diff --git a/cmd/configschema/go.mod b/cmd/configschema/go.mod index 3796f3c74ae8..d6e678bf764e 100644 --- a/cmd/configschema/go.mod +++ b/cmd/configschema/go.mod @@ -8,8 +8,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 golang.org/x/mod v0.5.1 golang.org/x/text v0.3.7 @@ -188,7 +188,7 @@ require ( github.com/karrick/godirwalk v1.16.1 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b // indirect github.com/leoluk/perflib_exporter v0.1.0 // indirect github.com/lib/pq v1.10.5 // indirect @@ -440,6 +440,7 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect go.mongodb.org/atlas v0.15.0 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -802,3 +803,5 @@ exclude github.com/StackExchange/wmi v1.2.0 // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/cmd/configschema/go.sum b/cmd/configschema/go.sum index e10747c438a9..25fda4f6d7fa 100644 --- a/cmd/configschema/go.sum +++ b/cmd/configschema/go.sum @@ -82,8 +82,8 @@ code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 h1:mrZQaZmuDIPh code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= -contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= contrib.go.opencensus.io/exporter/stackdriver v0.13.11 h1:YzmWJ2OT2K3ouXyMm5FmFQPoDs5TfLjx6Xn5x5CLN0I= contrib.go.opencensus.io/exporter/stackdriver v0.13.11/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -1539,8 +1539,8 @@ github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM52 github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/crc32 v1.2.0/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1962,6 +1962,7 @@ github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -2099,7 +2100,6 @@ github.com/shirou/gopsutil v3.21.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMT github.com/shirou/gopsutil/v3 v3.21.1/go.mod h1:igHnfak0qnw1biGeI2qKQvu0ZkwvEkUcCLlYhZzdr/4= github.com/shirou/gopsutil/v3 v3.21.6/go.mod h1:JfVbDpIBLVzT8oKbvMg9P3wEIMDDpVn+LwHTKj0ST88= github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00= github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= @@ -2443,13 +2443,16 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/collector v0.28.0/go.mod h1:AP/BTXwo1eedoJO7V+HQ68CSvJU1lcdqOzJCgt1VsNs= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= go.opentelemetry.io/collector/model v0.45.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= @@ -2486,8 +2489,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M= -go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= go.opentelemetry.io/otel/exporters/prometheus v0.29.0 h1:jOrFr8pCPj52GCPNq3qd69SEug3QmqDJTzbrefUxkpw= +go.opentelemetry.io/otel/exporters/prometheus v0.29.0/go.mod h1:Er2VVJQZbHysogooLNchdZ3MLYoI7+d15mHmrRlRJCU= go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= @@ -2503,14 +2506,13 @@ go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= go.opentelemetry.io/otel/sdk/metric v0.29.0 h1:OCEp2igPFXQrGxSR/nwd/bDjkPlPlOVjIULA/ob0dNw= +go.opentelemetry.io/otel/sdk/metric v0.29.0/go.mod h1:IFkFNKI8Gq8zBdqOKdODCL9+LInBZLXaGpqSIKphNuU= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs= diff --git a/cmd/configschema/resolver_test.go b/cmd/configschema/resolver_test.go index 6b2abdec4916..974d3332883f 100644 --- a/cmd/configschema/resolver_test.go +++ b/cmd/configschema/resolver_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" ) @@ -38,7 +38,7 @@ func TestPackageDirLocal(t *testing.T) { } func TestPackageDirError(t *testing.T) { - pkg := pdata.NewSum() + pkg := pmetric.NewSum() pkgType := reflect.ValueOf(pkg).Type() srcRoot := "test/fail" dr := NewDirResolver(srcRoot, DefaultModule) diff --git a/cmd/mdatagen/go.mod b/cmd/mdatagen/go.mod index bf9bc8f3b43f..ed7c3312b9b7 100644 --- a/cmd/mdatagen/go.mod +++ b/cmd/mdatagen/go.mod @@ -7,15 +7,15 @@ require ( github.com/go-playground/universal-translator v0.18.0 github.com/go-playground/validator/v10 v10.10.1 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -23,13 +23,14 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/cmd/mdatagen/go.sum b/cmd/mdatagen/go.sum index 5e606aa701e9..472cde00d4f3 100644 --- a/cmd/mdatagen/go.sum +++ b/cmd/mdatagen/go.sum @@ -73,8 +73,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -122,8 +122,6 @@ github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUA github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -136,10 +134,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -198,8 +196,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/cmd/mdatagen/loader.go b/cmd/mdatagen/loader.go index c08b9d8d470f..3d7a7dcb3360 100644 --- a/cmd/mdatagen/loader.go +++ b/cmd/mdatagen/loader.go @@ -26,7 +26,7 @@ import ( "github.com/go-playground/validator/v10/non-standard/validators" en_translations "github.com/go-playground/validator/v10/translations/en" "go.opentelemetry.io/collector/config/mapprovider/filemapprovider" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) type metricName string @@ -52,24 +52,24 @@ func (mn attributeName) RenderUnexported() (string, error) { // ValueType defines an attribute value type. type ValueType struct { // ValueType is type of the metric number, options are "double", "int". - ValueType pdata.ValueType + ValueType pcommon.ValueType } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (mvt *ValueType) UnmarshalText(text []byte) error { switch vtStr := string(text); vtStr { case "": - mvt.ValueType = pdata.ValueTypeEmpty + mvt.ValueType = pcommon.ValueTypeEmpty case "string": - mvt.ValueType = pdata.ValueTypeString + mvt.ValueType = pcommon.ValueTypeString case "int": - mvt.ValueType = pdata.ValueTypeInt + mvt.ValueType = pcommon.ValueTypeInt case "double": - mvt.ValueType = pdata.ValueTypeDouble + mvt.ValueType = pcommon.ValueTypeDouble case "bool": - mvt.ValueType = pdata.ValueTypeDouble + mvt.ValueType = pcommon.ValueTypeDouble case "bytes": - mvt.ValueType = pdata.ValueTypeDouble + mvt.ValueType = pcommon.ValueTypeDouble default: return fmt.Errorf("invalid type: %q", vtStr) } @@ -84,15 +84,15 @@ func (mvt ValueType) String() string { // Primitive returns name of primitive type for the ValueType. func (mvt ValueType) Primitive() string { switch mvt.ValueType { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return "string" - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return "int64" - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return "float64" - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return "bool" - case pdata.ValueTypeBytes: + case pcommon.ValueTypeBytes: return "[]byte" default: return "" diff --git a/cmd/mdatagen/loader_test.go b/cmd/mdatagen/loader_test.go index 4995a59b89be..e73e9873975e 100644 --- a/cmd/mdatagen/loader_test.go +++ b/cmd/mdatagen/loader_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func Test_loadMetadata(t *testing.T) { @@ -53,7 +53,7 @@ func Test_loadMetadata(t *testing.T) { ExtendedDocumentation: "Additional information on CPU Time can be found [here](https://en.wikipedia.org/wiki/CPU_time).", Unit: "s", Sum: &sum{ - MetricValueType: MetricValueType{pdata.MetricValueTypeDouble}, + MetricValueType: MetricValueType{pmetric.MetricValueTypeDouble}, Aggregated: Aggregated{Aggregation: "cumulative"}, Mono: Mono{Monotonic: true}, }, @@ -64,7 +64,7 @@ func Test_loadMetadata(t *testing.T) { Description: "Percentage of CPU time broken down by different states.", Unit: "1", Gauge: &gauge{ - MetricValueType: MetricValueType{pdata.MetricValueTypeDouble}, + MetricValueType: MetricValueType{pmetric.MetricValueTypeDouble}, }, Attributes: []attributeName{"enumAttribute"}, }, diff --git a/cmd/mdatagen/metricdata.go b/cmd/mdatagen/metricdata.go index b12c4add3304..47d0be608c6b 100644 --- a/cmd/mdatagen/metricdata.go +++ b/cmd/mdatagen/metricdata.go @@ -17,7 +17,7 @@ package main import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) var ( @@ -45,11 +45,11 @@ type Aggregated struct { func (agg Aggregated) Type() string { switch agg.Aggregation { case "delta": - return "pdata.MetricAggregationTemporalityDelta" + return "pmetric.MetricAggregationTemporalityDelta" case "cumulative": - return "pdata.MetricAggregationTemporalityCumulative" + return "pmetric.MetricAggregationTemporalityCumulative" default: - return "pdata.MetricAggregationTemporalityUnknown" + return "pmetric.MetricAggregationTemporalityUnknown" } } @@ -62,16 +62,16 @@ type Mono struct { // MetricValueType defines the metric number type. type MetricValueType struct { // ValueType is type of the metric number, options are "double", "int". - ValueType pdata.MetricValueType `validate:"required"` + ValueType pmetric.MetricValueType `validate:"required"` } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (mvt *MetricValueType) UnmarshalText(text []byte) error { switch vtStr := string(text); vtStr { case "int": - mvt.ValueType = pdata.MetricValueTypeInt + mvt.ValueType = pmetric.MetricValueTypeInt case "double": - mvt.ValueType = pdata.MetricValueTypeDouble + mvt.ValueType = pmetric.MetricValueTypeDouble default: return fmt.Errorf("invalid value_type: %q", vtStr) } @@ -86,9 +86,9 @@ func (mvt MetricValueType) String() string { // BasicType returns name of a golang basic type for the datapoint type. func (mvt MetricValueType) BasicType() string { switch mvt.ValueType { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return "int64" - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return "float64" default: return "" diff --git a/cmd/mdatagen/metricdata_test.go b/cmd/mdatagen/metricdata_test.go index 3b2269b55baf..423f18ecfd99 100644 --- a/cmd/mdatagen/metricdata_test.go +++ b/cmd/mdatagen/metricdata_test.go @@ -39,11 +39,11 @@ func TestMetricData(t *testing.T) { func TestAggregation(t *testing.T) { delta := Aggregated{Aggregation: "delta"} - assert.Equal(t, "pdata.MetricAggregationTemporalityDelta", delta.Type()) + assert.Equal(t, "pmetric.MetricAggregationTemporalityDelta", delta.Type()) cumulative := Aggregated{Aggregation: "cumulative"} - assert.Equal(t, "pdata.MetricAggregationTemporalityCumulative", cumulative.Type()) + assert.Equal(t, "pmetric.MetricAggregationTemporalityCumulative", cumulative.Type()) unknown := Aggregated{Aggregation: ""} - assert.Equal(t, "pdata.MetricAggregationTemporalityUnknown", unknown.Type()) + assert.Equal(t, "pmetric.MetricAggregationTemporalityUnknown", unknown.Type()) } diff --git a/cmd/mdatagen/metrics.tmpl b/cmd/mdatagen/metrics.tmpl index 6684b8853bea..9cf572306db4 100644 --- a/cmd/mdatagen/metrics.tmpl +++ b/cmd/mdatagen/metrics.tmpl @@ -4,7 +4,7 @@ package {{ .Package }} import ( "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Type is the component type name. @@ -13,14 +13,14 @@ const Type config.Type = "{{ .Name }}" // MetricIntf is an interface to generically interact with generated metric. type MetricIntf interface { Name() string - New() pdata.Metric - Init(metric pdata.Metric) + New() pmetric.Metric + Init(metric pmetric.Metric) } // Intentionally not exposing this so that it is opaque and can change freely. type metricImpl struct { name string - initFunc func(pdata.Metric) + initFunc func(pmetric.Metric) } // Name returns the metric name. @@ -29,14 +29,14 @@ func (m *metricImpl) Name() string { } // New creates a metric object preinitialized. -func (m *metricImpl) New() pdata.Metric { - metric := pdata.NewMetric() +func (m *metricImpl) New() pmetric.Metric { + metric := pmetric.NewMetric() m.Init(metric) return metric } // Init initializes the provided metric object. -func (m *metricImpl) Init(metric pdata.Metric) { +func (m *metricImpl) Init(metric pmetric.Metric) { m.initFunc(metric) } @@ -72,11 +72,11 @@ var Metrics = &metricStruct{ {{- range $name, $metric := .Metrics }} &metricImpl{ "{{ $name }}", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("{{ $name }}") metric.SetDescription("{{ $metric.Description }}") metric.SetUnit("{{ $metric.Unit }}") - metric.SetDataType(pdata.MetricDataType{{ $metric.Data.Type }}) + metric.SetDataType(pmetric.MetricDataType{{ $metric.Data.Type }}) {{- if $metric.Data.HasMonotonic }} metric.{{ $metric.Data.Type }}().SetIsMonotonic({{ $metric.Data.Monotonic }}) {{- end }} diff --git a/cmd/mdatagen/metrics_v2.tmpl b/cmd/mdatagen/metrics_v2.tmpl index 4f0e43df69fd..7fbf69c16cec 100644 --- a/cmd/mdatagen/metrics_v2.tmpl +++ b/cmd/mdatagen/metrics_v2.tmpl @@ -5,7 +5,8 @@ package {{ .Package }} import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" {{- if .SemConvVersion }} conventions "go.opentelemetry.io/collector/model/semconv/v{{ .SemConvVersion }}" {{- end }} @@ -35,7 +36,7 @@ func DefaultMetricsSettings() MetricsSettings { {{ range $name, $metric := .Metrics -}} type metric{{ $name.Render }} struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -45,7 +46,7 @@ func (m *metric{{ $name.Render }}) init() { m.data.SetName("{{ $name }}") m.data.SetDescription("{{ $metric.Description }}") m.data.SetUnit("{{ $metric.Unit }}") - m.data.SetDataType(pdata.MetricDataType{{ $metric.Data.Type }}) + m.data.SetDataType(pmetric.MetricDataType{{ $metric.Data.Type }}) {{- if $metric.Data.HasMonotonic }} m.data.{{ $metric.Data.Type }}().SetIsMonotonic({{ $metric.Data.Monotonic }}) {{- end }} @@ -57,7 +58,7 @@ func (m *metric{{ $name.Render }}) init() { {{- end }} } -func (m *metric{{ $name.Render }}) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp +func (m *metric{{ $name.Render }}) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp {{- if $metric.Data.HasMetricValueType }}, val {{ $metric.Data.MetricValueType.BasicType }}{{ end }} {{- range $metric.Attributes -}}, {{ .RenderUnexported }}AttributeValue string {{ end }}) { if !m.settings.Enabled { @@ -70,7 +71,7 @@ func (m *metric{{ $name.Render }}) recordDataPoint(start pdata.Timestamp, ts pda dp.Set{{ $metric.Data.MetricValueType }}Val(val) {{- end }} {{- range $metric.Attributes }} - dp.Attributes().Insert(A.{{ .Render }}, pdata.NewValueString({{ .RenderUnexported }}AttributeValue)) + dp.Attributes().Insert(A.{{ .Render }}, pcommon.NewValueString({{ .RenderUnexported }}AttributeValue)) {{- end }} } @@ -82,7 +83,7 @@ func (m *metric{{ $name.Render }}) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metric{{ $name.Render }}) emit(metrics pdata.MetricSlice) { +func (m *metric{{ $name.Render }}) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.{{ $metric.Data.Type }}().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -93,7 +94,7 @@ func (m *metric{{ $name.Render }}) emit(metrics pdata.MetricSlice) { func newMetric{{ $name.Render }}(settings MetricSettings) metric{{ $name.Render }} { m := metric{{ $name.Render }}{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -104,10 +105,10 @@ func newMetric{{ $name.Render }}(settings MetricSettings) metric{{ $name.Render // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. metricsCapacity int // maximum observed number of metrics per resource. resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. {{- range $name, $metric := .Metrics }} metric{{ $name.Render }} metric{{ $name.Render }} {{- end }} @@ -117,7 +118,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -125,8 +126,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), {{- range $name, $metric := .Metrics }} metric{{ $name.Render }}: newMetric{{ $name.Render }}(settings.{{ $name.Render }}), {{- end }} @@ -138,7 +139,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -148,12 +149,12 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) {{- range $name, $attr := .ResourceAttributes }} // With{{ $name.Render }} sets provided value as "{{ $name }}" attribute for current resource. func With{{ $name.Render }}(val {{ $attr.Type.Primitive }}) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().Upsert{{ $attr.Type }}("{{ $name }}", val) } } @@ -164,7 +165,7 @@ func With{{ $name.Render }}(val {{ $attr.Type.Primitive }}) ResourceOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() {{- if .SemConvVersion }} rm.SetSchemaUrl(conventions.SchemaURL) {{- end }} @@ -187,16 +188,16 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } {{ range $name, $metric := .Metrics -}} // Record{{ $name.Render }}DataPoint adds a data point to {{ $name }} metric. -func (mb *MetricsBuilder) Record{{ $name.Render }}DataPoint(ts pdata.Timestamp +func (mb *MetricsBuilder) Record{{ $name.Render }}DataPoint(ts pcommon.Timestamp {{- if $metric.Data.HasMetricValueType }}, val {{ $metric.Data.MetricValueType.BasicType }}{{ end }} {{- range $metric.Attributes -}} , {{ .RenderUnexported }}AttributeValue string{{ end }}) { mb.metric{{ $name.Render }}.recordDataPoint(mb.startTime, ts @@ -208,7 +209,7 @@ func (mb *MetricsBuilder) Record{{ $name.Render }}DataPoint(ts pdata.Timestamp // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/examples/demo/client/go.mod b/examples/demo/client/go.mod index 8acafe77f928..344cf9a264e6 100644 --- a/examples/demo/client/go.mod +++ b/examples/demo/client/go.mod @@ -32,3 +32,5 @@ require ( google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect google.golang.org/protobuf v1.28.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/examples/demo/server/go.mod b/examples/demo/server/go.mod index 18194a0565ab..205557ff0f52 100644 --- a/examples/demo/server/go.mod +++ b/examples/demo/server/go.mod @@ -32,3 +32,5 @@ require ( google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect google.golang.org/protobuf v1.28.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/alibabacloudlogserviceexporter/go.mod b/exporter/alibabacloudlogserviceexporter/go.mod index 5c09555e578f..8b53bde0d615 100644 --- a/exporter/alibabacloudlogserviceexporter/go.mod +++ b/exporter/alibabacloudlogserviceexporter/go.mod @@ -7,19 +7,20 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( github.com/cenkalti/backoff v2.2.1+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-kit/kit v0.10.0 // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -27,18 +28,13 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -46,3 +42,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/alibabacloudlogserviceexporter/go.sum b/exporter/alibabacloudlogserviceexporter/go.sum index b3e545b24da0..f70f7dc84bec 100644 --- a/exporter/alibabacloudlogserviceexporter/go.sum +++ b/exporter/alibabacloudlogserviceexporter/go.sum @@ -3,7 +3,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -14,7 +13,6 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aliyun/aliyun-log-go-sdk v0.1.29 h1:LOj/gVRUyWi2jMFuYc3Zs5I3VDNn33oj8eTbrzVH/CM= github.com/aliyun/aliyun-log-go-sdk v0.1.29/go.mod h1:aBG0R+MWRTgvlIODQkz+a3/RM9bQYKsmSbKdbIx4vpc= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -43,22 +41,15 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -79,9 +70,6 @@ github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4s github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -101,8 +89,9 @@ github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgO github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -128,7 +117,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -165,12 +153,10 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -225,8 +211,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -331,7 +317,6 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= @@ -349,9 +334,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -364,7 +346,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -383,20 +364,21 @@ go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -453,11 +435,10 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -490,16 +471,13 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -536,10 +514,8 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -553,12 +529,8 @@ google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -570,7 +542,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= @@ -591,7 +562,6 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/alibabacloudlogserviceexporter/logs_exporter.go b/exporter/alibabacloudlogserviceexporter/logs_exporter.go index 36f97dac0eaf..72986972a0e0 100644 --- a/exporter/alibabacloudlogserviceexporter/logs_exporter.go +++ b/exporter/alibabacloudlogserviceexporter/logs_exporter.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -49,7 +49,7 @@ type logServiceLogsSender struct { func (s *logServiceLogsSender) pushLogsData( ctx context.Context, - md pdata.Logs) error { + md plog.Logs) error { var err error slsLogs := logDataToLogService(md) if len(slsLogs) > 0 { diff --git a/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go b/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go index 4378412a1883..1f510abdadd9 100644 --- a/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go +++ b/exporter/alibabacloudlogserviceexporter/logs_exporter_test.go @@ -24,19 +24,20 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) -func createSimpleLogData(numberOfLogs int) pdata.Logs { - logs := pdata.NewLogs() +func createSimpleLogData(numberOfLogs int) plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() rl.ScopeLogs().AppendEmpty() // Add an empty ScopeLogs sl := rl.ScopeLogs().AppendEmpty() for i := 0; i < numberOfLogs; i++ { - ts := pdata.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") diff --git a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go index f3564de2fbed..ea125793a4fa 100644 --- a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go +++ b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice.go @@ -21,8 +21,9 @@ import ( sls "github.com/aliyun/aliyun-log-go-sdk" "github.com/gogo/protobuf/proto" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) const ( @@ -40,7 +41,7 @@ const ( slsLogInstrumentationVersion = "otlp.version" ) -func logDataToLogService(ld pdata.Logs) []*sls.Log { +func logDataToLogService(ld plog.Logs) []*sls.Log { slsLogs := make([]*sls.Log, 0) rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { @@ -64,7 +65,7 @@ func logDataToLogService(ld pdata.Logs) []*sls.Log { return slsLogs } -func resourceToLogContents(resource pdata.Resource) []*sls.LogContent { +func resourceToLogContents(resource pcommon.Resource) []*sls.LogContent { logContents := make([]*sls.LogContent, 3) attrs := resource.Attributes() if hostName, ok := attrs.Get(conventions.AttributeHostName); ok { @@ -92,7 +93,7 @@ func resourceToLogContents(resource pdata.Resource) []*sls.LogContent { } fields := map[string]interface{}{} - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { if k == conventions.AttributeServiceName || k == conventions.AttributeHostName { return true } @@ -108,7 +109,7 @@ func resourceToLogContents(resource pdata.Resource) []*sls.LogContent { return logContents } -func instrumentationScopeToLogContents(instrumentationScope pdata.InstrumentationScope) []*sls.LogContent { +func instrumentationScopeToLogContents(instrumentationScope pcommon.InstrumentationScope) []*sls.LogContent { logContents := make([]*sls.LogContent, 2) logContents[0] = &sls.LogContent{ Key: proto.String(slsLogInstrumentationName), @@ -121,10 +122,10 @@ func instrumentationScopeToLogContents(instrumentationScope pdata.Instrumentatio return logContents } -func mapLogRecordToLogService(lr pdata.LogRecord, +func mapLogRecordToLogService(lr plog.LogRecord, resourceContents, instrumentationLibraryContents []*sls.LogContent) *sls.Log { - if lr.Body().Type() == pdata.ValueTypeEmpty { + if lr.Body().Type() == pcommon.ValueTypeEmpty { return nil } var slsLog sls.Log @@ -153,7 +154,7 @@ func mapLogRecordToLogService(lr pdata.LogRecord, }) fields := map[string]interface{}{} - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { fields[k] = v.AsString() return true }) diff --git a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go index 430dbf365da6..7dbe21397492 100644 --- a/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/logsdata_to_logservice_test.go @@ -21,23 +21,24 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) -func getComplexAttributeValueMap() pdata.Value { - mapVal := pdata.NewValueMap() +func getComplexAttributeValueMap() pcommon.Value { + mapVal := pcommon.NewValueMap() mapValReal := mapVal.MapVal() mapValReal.InsertBool("result", true) mapValReal.InsertString("status", "ok") mapValReal.InsertDouble("value", 1.3) mapValReal.InsertInt("code", 200) mapValReal.InsertNull("null") - arrayVal := pdata.NewValueSlice() + arrayVal := pcommon.NewValueSlice() arrayVal.SliceVal().AppendEmpty().SetStringVal("array") mapValReal.Insert("array", arrayVal) - subMapVal := pdata.NewValueMap() + subMapVal := pcommon.NewValueMap() subMapVal.MapVal().InsertString("data", "hello world") mapValReal.Insert("map", subMapVal) @@ -45,8 +46,8 @@ func getComplexAttributeValueMap() pdata.Value { return mapVal } -func createLogData(numberOfLogs int) pdata.Logs { - logs := pdata.NewLogs() +func createLogData(numberOfLogs int) plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertString("resouceKey", "resourceValue") @@ -57,7 +58,7 @@ func createLogData(numberOfLogs int) pdata.Logs { sl.Scope().SetVersion("v0.1.0") for i := 0; i < numberOfLogs; i++ { - ts := pdata.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() switch i { case 0: @@ -75,7 +76,7 @@ func createLogData(numberOfLogs int) pdata.Logs { logRecord.Attributes().Insert("map-value", getComplexAttributeValueMap()) logRecord.Body().SetStringVal("log contents") case 6: - arrayVal := pdata.NewValueSlice() + arrayVal := pcommon.NewValueSlice() arrayVal.SliceVal().AppendEmpty().SetStringVal("array") logRecord.Attributes().Insert("array-value", arrayVal) logRecord.Body().SetStringVal("log contents") diff --git a/exporter/alibabacloudlogserviceexporter/metrics_exporter.go b/exporter/alibabacloudlogserviceexporter/metrics_exporter.go index 38d645d3a1b4..45bc7f9b82ca 100644 --- a/exporter/alibabacloudlogserviceexporter/metrics_exporter.go +++ b/exporter/alibabacloudlogserviceexporter/metrics_exporter.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -49,7 +49,7 @@ type logServiceMetricsSender struct { func (s *logServiceMetricsSender) pushMetricsData( _ context.Context, - md pdata.Metrics, + md pmetric.Metrics, ) error { var err error logs := metricsDataToLogServiceData(s.logger, md) diff --git a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go index 3e25cf3cdf84..9e1e9b71ff54 100644 --- a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go +++ b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice.go @@ -21,7 +21,8 @@ import ( sls "github.com/aliyun/aliyun-log-go-sdk" "github.com/gogo/protobuf/proto" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -156,9 +157,9 @@ func min(l, r int) int { return r } -func resourceToMetricLabels(labels *KeyValues, resource pdata.Resource) { +func resourceToMetricLabels(labels *KeyValues, resource pcommon.Resource) { attrs := resource.Attributes() - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { labels.keyValues = append(labels.keyValues, KeyValue{ Key: k, Value: v.AsString(), @@ -167,17 +168,17 @@ func resourceToMetricLabels(labels *KeyValues, resource pdata.Resource) { }) } -func numberMetricsToLogs(name string, data pdata.NumberDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { +func numberMetricsToLogs(name string, data pmetric.NumberDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { for i := 0; i < data.Len(); i++ { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := defaultLabels.Clone() - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { labels.Append(k, v.AsString()) return true }) switch dataPoint.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: logs = append(logs, newMetricLogFromRaw(name, labels, @@ -185,7 +186,7 @@ func numberMetricsToLogs(name string, data pdata.NumberDataPointSlice, defaultLa float64(dataPoint.IntVal()), ), ) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: logs = append(logs, newMetricLogFromRaw(name, labels, @@ -198,12 +199,12 @@ func numberMetricsToLogs(name string, data pdata.NumberDataPointSlice, defaultLa return logs } -func doubleHistogramMetricsToLogs(name string, data pdata.HistogramDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { +func doubleHistogramMetricsToLogs(name string, data pmetric.HistogramDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { for i := 0; i < data.Len(); i++ { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := defaultLabels.Clone() - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { labels.Append(k, v.AsString()) return true }) @@ -246,12 +247,12 @@ func doubleHistogramMetricsToLogs(name string, data pdata.HistogramDataPointSlic return logs } -func doubleSummaryMetricsToLogs(name string, data pdata.SummaryDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { +func doubleSummaryMetricsToLogs(name string, data pmetric.SummaryDataPointSlice, defaultLabels KeyValues) (logs []*sls.Log) { for i := 0; i < data.Len(); i++ { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := defaultLabels.Clone() - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { labels.Append(k, v.AsString()) return true }) @@ -282,17 +283,17 @@ func doubleSummaryMetricsToLogs(name string, data pdata.SummaryDataPointSlice, d return logs } -func metricDataToLogServiceData(md pdata.Metric, defaultLabels KeyValues) (logs []*sls.Log) { +func metricDataToLogServiceData(md pmetric.Metric, defaultLabels KeyValues) (logs []*sls.Log) { switch md.DataType() { - case pdata.MetricDataTypeNone: + case pmetric.MetricDataTypeNone: break - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return numberMetricsToLogs(md.Name(), md.Gauge().DataPoints(), defaultLabels) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return numberMetricsToLogs(md.Name(), md.Sum().DataPoints(), defaultLabels) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return doubleHistogramMetricsToLogs(md.Name(), md.Histogram().DataPoints(), defaultLabels) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return doubleSummaryMetricsToLogs(md.Name(), md.Summary().DataPoints(), defaultLabels) } return logs @@ -300,7 +301,7 @@ func metricDataToLogServiceData(md pdata.Metric, defaultLabels KeyValues) (logs func metricsDataToLogServiceData( _ *zap.Logger, - md pdata.Metrics, + md pmetric.Metrics, ) (logs []*sls.Log) { resMetrics := md.ResourceMetrics() diff --git a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go index ac073f7c11db..bcc384c2419a 100644 --- a/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/metricsdata_to_logservice_test.go @@ -20,14 +20,15 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) func TestMetricDataToLogService(t *testing.T) { logger := zap.NewNop() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().AppendEmpty() // Add an empty ResourceMetrics rm := md.ResourceMetrics().AppendEmpty() @@ -47,47 +48,47 @@ func TestMetricDataToLogService(t *testing.T) { noneMetric.SetName("none") intGaugeMetric := metrics.AppendEmpty() - intGaugeMetric.SetDataType(pdata.MetricDataTypeGauge) + intGaugeMetric.SetDataType(pmetric.MetricDataTypeGauge) intGaugeMetric.SetName("int_gauge") intGauge := intGaugeMetric.Gauge() intGaugeDataPoints := intGauge.DataPoints() intGaugeDataPoint := intGaugeDataPoints.AppendEmpty() intGaugeDataPoint.Attributes().InsertString("innerLabel", "innerValue") intGaugeDataPoint.SetIntVal(10) - intGaugeDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + intGaugeDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleGaugeMetric := metrics.AppendEmpty() - doubleGaugeMetric.SetDataType(pdata.MetricDataTypeGauge) + doubleGaugeMetric.SetDataType(pmetric.MetricDataTypeGauge) doubleGaugeMetric.SetName("double_gauge") doubleGauge := doubleGaugeMetric.Gauge() doubleGaugeDataPoints := doubleGauge.DataPoints() doubleGaugeDataPoint := doubleGaugeDataPoints.AppendEmpty() doubleGaugeDataPoint.Attributes().InsertString("innerLabel", "innerValue") doubleGaugeDataPoint.SetDoubleVal(10.1) - doubleGaugeDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleGaugeDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) intSumMetric := metrics.AppendEmpty() - intSumMetric.SetDataType(pdata.MetricDataTypeSum) + intSumMetric.SetDataType(pmetric.MetricDataTypeSum) intSumMetric.SetName("int_sum") intSum := intSumMetric.Sum() intSumDataPoints := intSum.DataPoints() intSumDataPoint := intSumDataPoints.AppendEmpty() intSumDataPoint.Attributes().InsertString("innerLabel", "innerValue") intSumDataPoint.SetIntVal(11) - intSumDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + intSumDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleSumMetric := metrics.AppendEmpty() - doubleSumMetric.SetDataType(pdata.MetricDataTypeSum) + doubleSumMetric.SetDataType(pmetric.MetricDataTypeSum) doubleSumMetric.SetName("double_sum") doubleSum := doubleSumMetric.Sum() doubleSumDataPoints := doubleSum.DataPoints() doubleSumDataPoint := doubleSumDataPoints.AppendEmpty() doubleSumDataPoint.Attributes().InsertString("innerLabel", "innerValue") doubleSumDataPoint.SetDoubleVal(10.1) - doubleSumDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleSumDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleHistogramMetric := metrics.AppendEmpty() - doubleHistogramMetric.SetDataType(pdata.MetricDataTypeHistogram) + doubleHistogramMetric.SetDataType(pmetric.MetricDataTypeHistogram) doubleHistogramMetric.SetName("double_$histogram") doubleHistogram := doubleHistogramMetric.Histogram() doubleHistogramDataPoints := doubleHistogram.DataPoints() @@ -95,19 +96,19 @@ func TestMetricDataToLogService(t *testing.T) { doubleHistogramDataPoint.Attributes().InsertString("innerLabel", "innerValue") doubleHistogramDataPoint.SetCount(2) doubleHistogramDataPoint.SetSum(10.1) - doubleHistogramDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleHistogramDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleHistogramDataPoint.SetBucketCounts([]uint64{1, 2, 3}) doubleHistogramDataPoint.SetExplicitBounds([]float64{1, 2}) doubleSummaryMetric := metrics.AppendEmpty() - doubleSummaryMetric.SetDataType(pdata.MetricDataTypeSummary) + doubleSummaryMetric.SetDataType(pmetric.MetricDataTypeSummary) doubleSummaryMetric.SetName("double-summary") doubleSummary := doubleSummaryMetric.Summary() doubleSummaryDataPoints := doubleSummary.DataPoints() doubleSummaryDataPoint := doubleSummaryDataPoints.AppendEmpty() doubleSummaryDataPoint.SetCount(2) doubleSummaryDataPoint.SetSum(10.1) - doubleSummaryDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleSummaryDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleSummaryDataPoint.Attributes().InsertString("innerLabel", "innerValue") quantileVal := doubleSummaryDataPoint.QuantileValues().AppendEmpty() quantileVal.SetValue(10.2) diff --git a/exporter/alibabacloudlogserviceexporter/trace_exporter.go b/exporter/alibabacloudlogserviceexporter/trace_exporter.go index 03f7ff01a586..60dfbf993bf3 100644 --- a/exporter/alibabacloudlogserviceexporter/trace_exporter.go +++ b/exporter/alibabacloudlogserviceexporter/trace_exporter.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -49,7 +49,7 @@ type logServiceTraceSender struct { func (s *logServiceTraceSender) pushTraceData( _ context.Context, - td pdata.Traces, + td ptrace.Traces, ) error { var err error slsLogs := traceDataToLogServiceData(td) diff --git a/exporter/alibabacloudlogserviceexporter/trace_exporter_test.go b/exporter/alibabacloudlogserviceexporter/trace_exporter_test.go index b5c4b91c32bb..1f0eb89d49bb 100644 --- a/exporter/alibabacloudlogserviceexporter/trace_exporter_test.go +++ b/exporter/alibabacloudlogserviceexporter/trace_exporter_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestNewTracesExporter(t *testing.T) { @@ -36,7 +36,7 @@ func TestNewTracesExporter(t *testing.T) { assert.NoError(t, err) require.NotNil(t, got) - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ss := rs.ScopeSpans().AppendEmpty() ss.Spans().AppendEmpty() diff --git a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go index 4bb4f405fff3..4251da0d17c6 100644 --- a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go +++ b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice.go @@ -21,7 +21,7 @@ import ( sls "github.com/aliyun/aliyun-log-go-sdk" "github.com/gogo/protobuf/proto" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" ) @@ -45,7 +45,7 @@ const ( ) // traceDataToLogService translates trace data into the LogService format. -func traceDataToLogServiceData(td pdata.Traces) []*sls.Log { +func traceDataToLogServiceData(td ptrace.Traces) []*sls.Log { var slsLogs []*sls.Log resourceSpansSlice := td.ResourceSpans() for i := 0; i < resourceSpansSlice.Len(); i++ { @@ -55,7 +55,7 @@ func traceDataToLogServiceData(td pdata.Traces) []*sls.Log { return slsLogs } -func resourceSpansToLogServiceData(resourceSpans pdata.ResourceSpans) []*sls.Log { +func resourceSpansToLogServiceData(resourceSpans ptrace.ResourceSpans) []*sls.Log { resourceContents := resourceToLogContents(resourceSpans.Resource()) scopeSpansSlice := resourceSpans.ScopeSpans() var slsLogs []*sls.Log @@ -72,7 +72,7 @@ func resourceSpansToLogServiceData(resourceSpans pdata.ResourceSpans) []*sls.Log return slsLogs } -func spanToLogServiceData(span pdata.Span, resourceContents, instrumentationLibraryContents []*sls.LogContent) *sls.Log { +func spanToLogServiceData(span ptrace.Span, resourceContents, instrumentationLibraryContents []*sls.LogContent) *sls.Log { timeNano := int64(span.EndTimestamp()) if timeNano == 0 { timeNano = time.Now().UnixNano() @@ -158,35 +158,35 @@ func spanToLogServiceData(span pdata.Span, resourceContents, instrumentationLibr return &slsLog } -func spanKindToShortString(kind pdata.SpanKind) string { +func spanKindToShortString(kind ptrace.SpanKind) string { switch kind { - case pdata.SpanKindInternal: + case ptrace.SpanKindInternal: return string(tracetranslator.OpenTracingSpanKindInternal) - case pdata.SpanKindClient: + case ptrace.SpanKindClient: return string(tracetranslator.OpenTracingSpanKindClient) - case pdata.SpanKindServer: + case ptrace.SpanKindServer: return string(tracetranslator.OpenTracingSpanKindServer) - case pdata.SpanKindProducer: + case ptrace.SpanKindProducer: return string(tracetranslator.OpenTracingSpanKindProducer) - case pdata.SpanKindConsumer: + case ptrace.SpanKindConsumer: return string(tracetranslator.OpenTracingSpanKindConsumer) default: return string(tracetranslator.OpenTracingSpanKindUnspecified) } } -func statusCodeToShortString(code pdata.StatusCode) string { +func statusCodeToShortString(code ptrace.StatusCode) string { switch code { - case pdata.StatusCodeError: + case ptrace.StatusCodeError: return "ERROR" - case pdata.StatusCodeOk: + case ptrace.StatusCodeOk: return "OK" default: return "UNSET" } } -func eventsToString(events pdata.SpanEventSlice) string { +func eventsToString(events ptrace.SpanEventSlice) string { eventArray := make([]map[string]interface{}, 0, events.Len()) for i := 0; i < events.Len(); i++ { spanEvent := events.At(i) @@ -201,7 +201,7 @@ func eventsToString(events pdata.SpanEventSlice) string { } -func spanLinksToString(spanLinkSlice pdata.SpanLinkSlice) string { +func spanLinksToString(spanLinkSlice ptrace.SpanLinkSlice) string { linkArray := make([]map[string]interface{}, 0, spanLinkSlice.Len()) for i := 0; i < spanLinkSlice.Len(); i++ { spanLink := spanLinkSlice.At(i) diff --git a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go index d9370ec2ce20..24483108e55d 100644 --- a/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go +++ b/exporter/alibabacloudlogserviceexporter/tracedata_to_logservice_test.go @@ -24,8 +24,9 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) type logKeyValuePair struct { @@ -81,8 +82,8 @@ func loadFromJSON(file string, obj interface{}) error { return err } -func constructSpanData() pdata.Traces { - traces := pdata.NewTraces() +func constructSpanData() ptrace.Traces { + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(1) rspans := traces.ResourceSpans().AppendEmpty() fillResource(rspans.Resource()) @@ -96,7 +97,7 @@ func constructSpanData() pdata.Traces { return traces } -func fillResource(resource pdata.Resource) { +func fillResource(resource pcommon.Resource) { attrs := resource.Attributes() attrs.InsertString(conventions.AttributeServiceName, "signup_aggregator") attrs.InsertString(conventions.AttributeHostName, "xxx.et15") @@ -109,7 +110,7 @@ func fillResource(resource pdata.Resource) { attrs.InsertString(conventions.AttributeCloudAvailabilityZone, "us-west-1b") } -func fillHTTPClientSpan(span pdata.Span) { +func fillHTTPClientSpan(span ptrace.Span) { attributes := make(map[string]interface{}) attributes[conventions.AttributeHTTPMethod] = "GET" attributes[conventions.AttributeHTTPURL] = "https://api.example.com/users/junit" @@ -122,9 +123,9 @@ func fillHTTPClientSpan(span pdata.Span) { span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) span.SetName("/users/junit") - span.SetKind(pdata.SpanKindClient) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindClient) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) span.SetTraceState("x:y") event := span.Events().AppendEmpty() @@ -141,7 +142,7 @@ func fillHTTPClientSpan(span pdata.Span) { status.SetMessage("OK") } -func fillHTTPServerSpan(span pdata.Span) { +func fillHTTPServerSpan(span ptrace.Span) { attributes := make(map[string]interface{}) attributes[conventions.AttributeHTTPMethod] = "GET" attributes[conventions.AttributeHTTPURL] = "https://api.example.com/users/junit" @@ -155,17 +156,17 @@ func fillHTTPServerSpan(span pdata.Span) { span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) span.SetName("/users/junit") - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) status := span.Status() status.SetCode(2) status.SetMessage("something error") } -func constructSpanAttributes(attributes map[string]interface{}) pdata.Map { - attrs := pdata.NewMap() +func constructSpanAttributes(attributes map[string]interface{}) pcommon.Map { + attrs := pcommon.NewMap() for key, value := range attributes { if cast, ok := value.(int); ok { attrs.InsertInt(key, int64(cast)) @@ -178,27 +179,27 @@ func constructSpanAttributes(attributes map[string]interface{}) pdata.Map { return attrs } -func newTraceID() pdata.TraceID { +func newTraceID() pcommon.TraceID { r := [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x96, 0x9A, 0x89, 0x55, 0x57, 0x1A, 0x3F} - return pdata.NewTraceID(r) + return pcommon.NewTraceID(r) } -func newSegmentID() pdata.SpanID { +func newSegmentID() pcommon.SpanID { r := [8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x7D, 0x98} - return pdata.NewSpanID(r) + return pcommon.NewSpanID(r) } func TestSpanKindToShortString(t *testing.T) { - assert.Equal(t, spanKindToShortString(pdata.SpanKindConsumer), "consumer") - assert.Equal(t, spanKindToShortString(pdata.SpanKindProducer), "producer") - assert.Equal(t, spanKindToShortString(pdata.SpanKindClient), "client") - assert.Equal(t, spanKindToShortString(pdata.SpanKindServer), "server") - assert.Equal(t, spanKindToShortString(pdata.SpanKindInternal), "internal") - assert.Equal(t, spanKindToShortString(pdata.SpanKindUnspecified), "") + assert.Equal(t, spanKindToShortString(ptrace.SpanKindConsumer), "consumer") + assert.Equal(t, spanKindToShortString(ptrace.SpanKindProducer), "producer") + assert.Equal(t, spanKindToShortString(ptrace.SpanKindClient), "client") + assert.Equal(t, spanKindToShortString(ptrace.SpanKindServer), "server") + assert.Equal(t, spanKindToShortString(ptrace.SpanKindInternal), "internal") + assert.Equal(t, spanKindToShortString(ptrace.SpanKindUnspecified), "") } func TestStatusCodeToShortString(t *testing.T) { - assert.Equal(t, statusCodeToShortString(pdata.StatusCodeOk), "OK") - assert.Equal(t, statusCodeToShortString(pdata.StatusCodeError), "ERROR") - assert.Equal(t, statusCodeToShortString(pdata.StatusCodeUnset), "UNSET") + assert.Equal(t, statusCodeToShortString(ptrace.StatusCodeOk), "OK") + assert.Equal(t, statusCodeToShortString(ptrace.StatusCodeError), "ERROR") + assert.Equal(t, statusCodeToShortString(ptrace.StatusCodeUnset), "UNSET") } diff --git a/exporter/awscloudwatchlogsexporter/exporter.go b/exporter/awscloudwatchlogsexporter/exporter.go index 86060ea9c79d..0184558ca96f 100644 --- a/exporter/awscloudwatchlogsexporter/exporter.go +++ b/exporter/awscloudwatchlogsexporter/exporter.go @@ -27,7 +27,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil" @@ -95,7 +96,7 @@ func newCwLogsExporter(config config.Exporter, params component.ExporterCreateSe } -func (e *exporter) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +func (e *exporter) ConsumeLogs(ctx context.Context, ld plog.Logs) error { cwLogsPusher := e.pusher logEvents, _ := logsToCWLogs(e.logger, ld) if len(logEvents) == 0 { @@ -137,7 +138,7 @@ func (e *exporter) Start(ctx context.Context, host component.Host) error { return nil } -func logsToCWLogs(logger *zap.Logger, ld pdata.Logs) ([]*cloudwatchlogs.InputLogEvent, int) { +func logsToCWLogs(logger *zap.Logger, ld plog.Logs) ([]*cloudwatchlogs.InputLogEvent, int) { n := ld.ResourceLogs().Len() if n == 0 { return []*cloudwatchlogs.InputLogEvent{}, 0 @@ -182,7 +183,7 @@ type cwLogBody struct { Resource map[string]interface{} `json:"resource,omitempty"` } -func logToCWLog(resourceAttrs map[string]interface{}, log pdata.LogRecord) (*cloudwatchlogs.InputLogEvent, error) { +func logToCWLog(resourceAttrs map[string]interface{}, log plog.LogRecord) (*cloudwatchlogs.InputLogEvent, error) { // TODO(jbd): Benchmark and improve the allocations. // Evaluate go.elastic.co/fastjson as a replacement for encoding/json. body := cwLogBody{ @@ -211,43 +212,43 @@ func logToCWLog(resourceAttrs map[string]interface{}, log pdata.LogRecord) (*clo }, nil } -func attrsValue(attrs pdata.Map) map[string]interface{} { +func attrsValue(attrs pcommon.Map) map[string]interface{} { if attrs.Len() == 0 { return nil } out := make(map[string]interface{}, attrs.Len()) - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { out[k] = attrValue(v) return true }) return out } -func attrValue(value pdata.Value) interface{} { +func attrValue(value pcommon.Value) interface{} { switch value.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return value.IntVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return value.BoolVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return value.DoubleVal() - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return value.StringVal() - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: values := map[string]interface{}{} - value.MapVal().Range(func(k string, v pdata.Value) bool { + value.MapVal().Range(func(k string, v pcommon.Value) bool { values[k] = attrValue(v) return true }) return values - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: arrayVal := value.SliceVal() values := make([]interface{}, arrayVal.Len()) for i := 0; i < arrayVal.Len(); i++ { values[i] = attrValue(arrayVal.At(i)) } return values - case pdata.ValueTypeEmpty: + case pcommon.ValueTypeEmpty: return nil default: return nil diff --git a/exporter/awscloudwatchlogsexporter/exporter_test.go b/exporter/awscloudwatchlogsexporter/exporter_test.go index f0718cafca33..5bee5416a4e0 100644 --- a/exporter/awscloudwatchlogsexporter/exporter_test.go +++ b/exporter/awscloudwatchlogsexporter/exporter_test.go @@ -25,7 +25,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs" ) @@ -55,8 +56,8 @@ func (p *mockPusher) ForceFlush() error { func TestLogToCWLog(t *testing.T) { tests := []struct { name string - resource pdata.Resource - log pdata.LogRecord + resource pcommon.Resource + log plog.LogRecord want *cloudwatchlogs.InputLogEvent wantErr bool }{ @@ -71,7 +72,7 @@ func TestLogToCWLog(t *testing.T) { }, { name: "no resource", - resource: pdata.NewResource(), + resource: pcommon.NewResource(), log: testLogRecord(), want: &cloudwatchlogs.InputLogEvent{ Timestamp: aws.Int64(1609719139), @@ -112,30 +113,30 @@ func BenchmarkLogToCWLog(b *testing.B) { } } -func testResource() pdata.Resource { - resource := pdata.NewResource() +func testResource() pcommon.Resource { + resource := pcommon.NewResource() resource.Attributes().InsertString("host", "abc123") resource.Attributes().InsertInt("node", 5) return resource } -func testLogRecord() pdata.LogRecord { - record := pdata.NewLogRecord() +func testLogRecord() plog.LogRecord { + record := plog.NewLogRecord() record.SetSeverityNumber(5) record.SetSeverityText("debug") record.SetDroppedAttributesCount(4) record.Body().SetStringVal("hello world") record.Attributes().InsertInt("key1", 1) record.Attributes().InsertString("key2", "attr2") - record.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - record.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + record.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + record.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) record.SetFlags(255) record.SetTimestamp(1609719139000000) return record } -func testLogRecordWithoutTrace() pdata.LogRecord { - record := pdata.NewLogRecord() +func testLogRecordWithoutTrace() plog.LogRecord { + record := plog.NewLogRecord() record.SetSeverityNumber(5) record.SetSeverityText("debug") record.SetDroppedAttributesCount(4) @@ -149,41 +150,41 @@ func testLogRecordWithoutTrace() pdata.LogRecord { func TestAttrValue(t *testing.T) { tests := []struct { name string - builder func() pdata.Value + builder func() pcommon.Value want interface{} }{ { name: "null", - builder: func() pdata.Value { - return pdata.NewValueEmpty() + builder: func() pcommon.Value { + return pcommon.NewValueEmpty() }, want: nil, }, { name: "bool", - builder: func() pdata.Value { - return pdata.NewValueBool(true) + builder: func() pcommon.Value { + return pcommon.NewValueBool(true) }, want: true, }, { name: "int", - builder: func() pdata.Value { - return pdata.NewValueInt(5) + builder: func() pcommon.Value { + return pcommon.NewValueInt(5) }, want: int64(5), }, { name: "double", - builder: func() pdata.Value { - return pdata.NewValueDouble(6.7) + builder: func() pcommon.Value { + return pcommon.NewValueDouble(6.7) }, want: float64(6.7), }, { name: "map", - builder: func() pdata.Value { - mAttr := pdata.NewValueMap() + builder: func() pcommon.Value { + mAttr := pcommon.NewValueMap() m := mAttr.MapVal() m.InsertString("key1", "value1") m.InsertNull("key2") @@ -202,15 +203,15 @@ func TestAttrValue(t *testing.T) { }, { name: "array", - builder: func() pdata.Value { - arrAttr := pdata.NewValueSlice() + builder: func() pcommon.Value { + arrAttr := pcommon.NewValueSlice() arr := arrAttr.SliceVal() - for _, av := range []pdata.Value{ - pdata.NewValueDouble(1.2), - pdata.NewValueDouble(1.6), - pdata.NewValueBool(true), - pdata.NewValueString("hello"), - pdata.NewValueEmpty(), + for _, av := range []pcommon.Value{ + pcommon.NewValueDouble(1.2), + pcommon.NewValueDouble(1.6), + pcommon.NewValueBool(true), + pcommon.NewValueString("hello"), + pcommon.NewValueEmpty(), } { tgt := arr.AppendEmpty() av.CopyTo(tgt) @@ -240,7 +241,7 @@ func TestConsumeLogs(t *testing.T) { exp, err := newCwLogsPusher(expCfg, componenttest.NewNopExporterCreateSettings()) assert.Nil(t, err) assert.NotNil(t, exp) - ld := pdata.NewLogs() + ld := plog.NewLogs() r := ld.ResourceLogs().AppendEmpty() r.Resource().Attributes().UpsertString("hello", "test") logRecords := r.ScopeLogs().AppendEmpty().LogRecords() diff --git a/exporter/awscloudwatchlogsexporter/go.mod b/exporter/awscloudwatchlogsexporter/go.mod index 15ade8d63ccb..72b1efdc294d 100644 --- a/exporter/awscloudwatchlogsexporter/go.mod +++ b/exporter/awscloudwatchlogsexporter/go.mod @@ -8,24 +8,22 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-00010101000000-000000000000 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -33,12 +31,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) @@ -46,3 +40,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => ../../internal/aws/awsutil replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs => ../../internal/aws/cwlogs + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/awscloudwatchlogsexporter/go.sum b/exporter/awscloudwatchlogsexporter/go.sum index f45ae143c817..843a60bf44f2 100644 --- a/exporter/awscloudwatchlogsexporter/go.sum +++ b/exporter/awscloudwatchlogsexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.43.37 h1:kyZ7UjaPZaCik+asF33UFOOYSwr9liDRr/UM/vuw8yY= @@ -20,35 +17,23 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -65,18 +50,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -86,15 +67,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -126,8 +104,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -166,15 +144,11 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -182,7 +156,6 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -192,20 +165,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -229,20 +201,18 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -258,7 +228,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -266,14 +235,12 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -296,22 +263,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -321,19 +282,13 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/awsemfexporter/datapoint.go b/exporter/awsemfexporter/datapoint.go index 5a6a571b5b3b..8c3c0d075fe6 100644 --- a/exporter/awsemfexporter/datapoint.go +++ b/exporter/awsemfexporter/datapoint.go @@ -17,7 +17,8 @@ package awsemfexporter // import "github.com/open-telemetry/opentelemetry-collec import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" aws "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics" @@ -48,9 +49,9 @@ type dataPoint struct { } // dataPoints is a wrapper interface for: -// - pdata.NumberDataPointSlice -// - pdata.histogramDataPointSlice -// - pdata.summaryDataPointSlice +// - pmetric.NumberDataPointSlice +// - pmetric.HistogramDataPointSlice +// - pmetric.SummaryDataPointSlice type dataPoints interface { Len() int // At gets the adjusted datapoint from the DataPointSlice at i-th index. @@ -82,24 +83,24 @@ func mergeLabels(m deltaMetricMetadata, labels map[string]string) map[string]str return result } -// numberDataPointSlice is a wrapper for pdata.NumberDataPointSlice +// numberDataPointSlice is a wrapper for pmetric.NumberDataPointSlice type numberDataPointSlice struct { instrumentationLibraryName string deltaMetricMetadata - pdata.NumberDataPointSlice + pmetric.NumberDataPointSlice } -// histogramDataPointSlice is a wrapper for pdata.histogramDataPointSlice +// histogramDataPointSlice is a wrapper for pmetric.HistogramDataPointSlice type histogramDataPointSlice struct { instrumentationLibraryName string - pdata.HistogramDataPointSlice + pmetric.HistogramDataPointSlice } -// summaryDataPointSlice is a wrapper for pdata.summaryDataPointSlice +// summaryDataPointSlice is a wrapper for pmetric.SummaryDataPointSlice type summaryDataPointSlice struct { instrumentationLibraryName string deltaMetricMetadata - pdata.SummaryDataPointSlice + pmetric.SummaryDataPointSlice } type summaryMetricEntry struct { @@ -115,9 +116,9 @@ func (dps numberDataPointSlice) At(i int) (dataPoint, bool) { var metricVal float64 switch metric.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: metricVal = metric.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: metricVal = float64(metric.IntVal()) } @@ -198,9 +199,9 @@ func (dps summaryDataPointSlice) At(i int) (dataPoint, bool) { // createLabels converts OTel AttributesMap attributes to a map // and optionally adds in the OTel instrumentation library name -func createLabels(attributes pdata.Map, instrLibName string) map[string]string { +func createLabels(attributes pcommon.Map, instrLibName string) map[string]string { labels := make(map[string]string, attributes.Len()+1) - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { labels[k] = v.AsString() return true }) @@ -214,7 +215,7 @@ func createLabels(attributes pdata.Map, instrLibName string) map[string]string { } // getDataPoints retrieves data points from OT Metric. -func getDataPoints(pmd *pdata.Metric, metadata cWMetricMetadata, logger *zap.Logger) (dps dataPoints) { +func getDataPoints(pmd *pmetric.Metric, metadata cWMetricMetadata, logger *zap.Logger) (dps dataPoints) { if pmd == nil { return } @@ -229,28 +230,28 @@ func getDataPoints(pmd *pdata.Metric, metadata cWMetricMetadata, logger *zap.Log } switch pmd.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: metric := pmd.Gauge() dps = numberDataPointSlice{ metadata.instrumentationLibraryName, adjusterMetadata, metric.DataPoints(), } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: metric := pmd.Sum() - adjusterMetadata.adjustToDelta = metric.AggregationTemporality() == pdata.MetricAggregationTemporalityCumulative + adjusterMetadata.adjustToDelta = metric.AggregationTemporality() == pmetric.MetricAggregationTemporalityCumulative dps = numberDataPointSlice{ metadata.instrumentationLibraryName, adjusterMetadata, metric.DataPoints(), } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: metric := pmd.Histogram() dps = histogramDataPointSlice{ metadata.instrumentationLibraryName, metric.DataPoints(), } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: metric := pmd.Summary() // For summaries coming from the prometheus receiver, the sum and count are cumulative, whereas for summaries // coming from other sources, e.g. SDK, the sum and count are delta by being accumulated and reset periodically. diff --git a/exporter/awsemfexporter/datapoint_test.go b/exporter/awsemfexporter/datapoint_test.go index 74689c6c6574..b485188d02f5 100644 --- a/exporter/awsemfexporter/datapoint_test.go +++ b/exporter/awsemfexporter/datapoint_test.go @@ -22,7 +22,8 @@ import ( metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" "github.com/golang/protobuf/ptypes/wrappers" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -293,10 +294,10 @@ func TestIntDataPointSliceAt(t *testing.T) { for i, tc := range testDeltaCases { t.Run(tc.testName, func(t *testing.T) { - testDPS := pdata.NewNumberDataPointSlice() + testDPS := pmetric.NewNumberDataPointSlice() testDP := testDPS.AppendEmpty() testDP.SetIntVal(tc.value.(int64)) - pdata.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) + pcommon.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) dps := numberDataPointSlice{ instrLibName, @@ -364,10 +365,10 @@ func TestDoubleDataPointSliceAt(t *testing.T) { for i, tc := range testDeltaCases { t.Run(tc.testName, func(t *testing.T) { - testDPS := pdata.NewNumberDataPointSlice() + testDPS := pmetric.NewNumberDataPointSlice() testDP := testDPS.AppendEmpty() testDP.SetDoubleVal(tc.value.(float64)) - pdata.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) + pcommon.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) dps := numberDataPointSlice{ instrLibName, @@ -396,13 +397,13 @@ func TestHistogramDataPointSliceAt(t *testing.T) { instrLibName := "cloudwatch-otel" labels := map[string]interface{}{"label1": "value1"} - testDPS := pdata.NewHistogramDataPointSlice() + testDPS := pmetric.NewHistogramDataPointSlice() testDP := testDPS.AppendEmpty() testDP.SetCount(uint64(17)) testDP.SetSum(17.13) testDP.SetBucketCounts([]uint64{1, 2, 3}) testDP.SetExplicitBounds([]float64{1, 2, 3}) - pdata.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) + pcommon.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) dps := histogramDataPointSlice{ instrLibName, @@ -456,7 +457,7 @@ func TestSummaryDataPointSliceAt(t *testing.T) { for i, tt := range testCases { t.Run(tt.testName, func(t *testing.T) { - testDPS := pdata.NewSummaryDataPointSlice() + testDPS := pmetric.NewSummaryDataPointSlice() testDP := testDPS.AppendEmpty() testDP.SetSum(tt.inputSumCount[0].(float64)) testDP.SetCount(tt.inputSumCount[1].(uint64)) @@ -468,7 +469,7 @@ func TestSummaryDataPointSliceAt(t *testing.T) { testQuantileValue = testDP.QuantileValues().AppendEmpty() testQuantileValue.SetQuantile(100) testQuantileValue.SetValue(float64(5)) - pdata.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) + pcommon.NewMapFromRaw(labels).CopyTo(testDP.Attributes()) dps := summaryDataPointSlice{ instrLibName, @@ -518,7 +519,7 @@ func TestCreateLabels(t *testing.T) { "b": "B", "c": "C", } - labelsMap := pdata.NewMapFromRaw(map[string]interface{}{ + labelsMap := pcommon.NewMapFromRaw(map[string]interface{}{ "a": "A", "b": "B", "c": "C", @@ -573,7 +574,7 @@ func TestGetDataPoints(t *testing.T) { numberDataPointSlice{ metadata.instrumentationLibraryName, dmm, - pdata.NumberDataPointSlice{}, + pmetric.NumberDataPointSlice{}, }, }, { @@ -583,7 +584,7 @@ func TestGetDataPoints(t *testing.T) { numberDataPointSlice{ metadata.instrumentationLibraryName, dmm, - pdata.NumberDataPointSlice{}, + pmetric.NumberDataPointSlice{}, }, }, { @@ -593,7 +594,7 @@ func TestGetDataPoints(t *testing.T) { numberDataPointSlice{ metadata.instrumentationLibraryName, cumulativeDmm, - pdata.NumberDataPointSlice{}, + pmetric.NumberDataPointSlice{}, }, }, { @@ -603,7 +604,7 @@ func TestGetDataPoints(t *testing.T) { numberDataPointSlice{ metadata.instrumentationLibraryName, cumulativeDmm, - pdata.NumberDataPointSlice{}, + pmetric.NumberDataPointSlice{}, }, }, { @@ -612,7 +613,7 @@ func TestGetDataPoints(t *testing.T) { generateTestHistogram("foo"), histogramDataPointSlice{ metadata.instrumentationLibraryName, - pdata.HistogramDataPointSlice{}, + pmetric.HistogramDataPointSlice{}, }, }, { @@ -622,7 +623,7 @@ func TestGetDataPoints(t *testing.T) { summaryDataPointSlice{ metadata.instrumentationLibraryName, dmm, - pdata.SummaryDataPointSlice{}, + pmetric.SummaryDataPointSlice{}, }, }, { @@ -632,7 +633,7 @@ func TestGetDataPoints(t *testing.T) { summaryDataPointSlice{ metadata.instrumentationLibraryName, cumulativeDmm, - pdata.SummaryDataPointSlice{}, + pmetric.SummaryDataPointSlice{}, }, }, } @@ -640,13 +641,13 @@ func TestGetDataPoints(t *testing.T) { for _, tc := range testCases { ocMetrics := []*metricspb.Metric{tc.metric} - // Retrieve *pdata.Metric + // Retrieve *pmetric.Metric rm := internaldata.OCToMetrics(nil, nil, ocMetrics).ResourceMetrics().At(0) metric := rm.ScopeMetrics().At(0).Metrics().At(0) logger := zap.NewNop() - expectedAttributes := pdata.NewMapFromRaw(map[string]interface{}{"label1": "value1"}) + expectedAttributes := pcommon.NewMapFromRaw(map[string]interface{}{"label1": "value1"}) t.Run(tc.testName, func(t *testing.T) { setupDataPointCache() @@ -667,9 +668,9 @@ func TestGetDataPoints(t *testing.T) { assert.Equal(t, 1, convertedDPS.Len()) dp := convertedDPS.NumberDataPointSlice.At(0) switch dp.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: assert.Equal(t, 0.1, dp.DoubleVal()) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: assert.Equal(t, int64(1), dp.IntVal()) } assert.Equal(t, expectedAttributes, dp.Attributes()) @@ -697,10 +698,10 @@ func TestGetDataPoints(t *testing.T) { } t.Run("Unhandled metric type", func(t *testing.T) { - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("foo") metric.SetUnit("Count") - metric.SetDataType(pdata.MetricDataTypeNone) + metric.SetDataType(pmetric.MetricDataTypeNone) obs, logs := observer.New(zap.WarnLevel) logger := zap.New(obs) @@ -767,7 +768,7 @@ func TestIntDataPointSlice_At(t *testing.T) { type fields struct { instrumentationLibraryName string deltaMetricMetadata deltaMetricMetadata - NumberDataPointSlice pdata.NumberDataPointSlice + NumberDataPointSlice pmetric.NumberDataPointSlice } type args struct { i int diff --git a/exporter/awsemfexporter/emf_exporter.go b/exporter/awsemfexporter/emf_exporter.go index f18b83d0a787..4bb879ce4b24 100644 --- a/exporter/awsemfexporter/emf_exporter.go +++ b/exporter/awsemfexporter/emf_exporter.go @@ -29,7 +29,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil" @@ -117,14 +118,14 @@ func newEmfExporter( return resourcetotelemetry.WrapMetricsExporter(config.(*Config).ResourceToTelemetrySettings, exporter), nil } -func (emf *emfExporter) pushMetricsData(_ context.Context, md pdata.Metrics) error { +func (emf *emfExporter) pushMetricsData(_ context.Context, md pmetric.Metrics) error { rms := md.ResourceMetrics() labels := map[string]string{} for i := 0; i < rms.Len(); i++ { rm := rms.At(i) am := rm.Resource().Attributes() if am.Len() > 0 { - am.Range(func(k string, v pdata.Value) bool { + am.Range(func(k string, v pcommon.Value) bool { labels[k] = v.StringVal() return true }) @@ -219,7 +220,7 @@ func (emf *emfExporter) listPushers() []cwlogs.Pusher { return pushers } -func (emf *emfExporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (emf *emfExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { return emf.pushMetricsData(ctx, md) } diff --git a/exporter/awsemfexporter/go.mod b/exporter/awsemfexporter/go.mod index 39d19a3a6d25..8356359334f6 100644 --- a/exporter/awsemfexporter/go.mod +++ b/exporter/awsemfexporter/go.mod @@ -13,26 +13,26 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -40,8 +40,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -60,3 +60,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/corei replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../../pkg/resourcetotelemetry replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/awsemfexporter/go.sum b/exporter/awsemfexporter/go.sum index d2cd09347abb..ce7a92a3c415 100644 --- a/exporter/awsemfexporter/go.sum +++ b/exporter/awsemfexporter/go.sum @@ -20,8 +20,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -96,7 +96,6 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -130,8 +129,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -175,8 +174,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -194,17 +191,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -241,8 +240,9 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -268,8 +268,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/exporter/awsemfexporter/grouped_metric.go b/exporter/awsemfexporter/grouped_metric.go index 2dee0bdfbabf..775739595c74 100644 --- a/exporter/awsemfexporter/grouped_metric.go +++ b/exporter/awsemfexporter/grouped_metric.go @@ -18,7 +18,7 @@ import ( "encoding/json" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" aws "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/metrics" @@ -38,7 +38,7 @@ type metricInfo struct { } // addToGroupedMetric processes OT metrics and adds them into GroupedMetric buckets -func addToGroupedMetric(pmd *pdata.Metric, groupedMetrics map[interface{}]*groupedMetric, metadata cWMetricMetadata, patternReplaceSucceeded bool, logger *zap.Logger, descriptor map[string]MetricDescriptor, config *Config) error { +func addToGroupedMetric(pmd *pmetric.Metric, groupedMetrics map[interface{}]*groupedMetric, metadata cWMetricMetadata, patternReplaceSucceeded bool, logger *zap.Logger, descriptor map[string]MetricDescriptor, config *Config) error { if pmd == nil { return nil } @@ -186,7 +186,7 @@ func groupedMetricKey(metadata groupedMetricMetadata, labels map[string]string) return aws.NewKey(metadata, labels) } -func translateUnit(metric *pdata.Metric, descriptor map[string]MetricDescriptor) string { +func translateUnit(metric *pmetric.Metric, descriptor map[string]MetricDescriptor) string { unit := metric.Unit() if descriptor, exists := descriptor[metric.Name()]; exists { if unit == "" || descriptor.overwrite { diff --git a/exporter/awsemfexporter/grouped_metric_test.go b/exporter/awsemfexporter/grouped_metric_test.go index 3e628f084ee9..bde78f3b3a2a 100644 --- a/exporter/awsemfexporter/grouped_metric_test.go +++ b/exporter/awsemfexporter/grouped_metric_test.go @@ -24,8 +24,8 @@ import ( metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -144,7 +144,7 @@ func TestAddToGroupedMetric(t *testing.T) { }, Metrics: tc.metric, } - // Retrieve *pdata.Metric + // Retrieve *pmetric.Metric rm := internaldata.OCToMetrics(oc.Node, oc.Resource, oc.Metrics) rms := rm.ResourceMetrics() assert.Equal(t, 1, rms.Len()) @@ -403,12 +403,12 @@ func TestAddToGroupedMetric(t *testing.T) { t.Run("Unhandled metric type", func(t *testing.T) { groupedMetrics := make(map[interface{}]*groupedMetric) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rms := md.ResourceMetrics() metric := rms.AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() metric.SetName("foo") metric.SetUnit("Count") - metric.SetDataType(pdata.MetricDataTypeNone) + metric.SetDataType(pmetric.MetricDataTypeNone) obs, logs := observer.New(zap.WarnLevel) obsLogger := zap.New(obs) @@ -507,7 +507,7 @@ func BenchmarkAddToGroupedMetric(b *testing.B) { } func TestTranslateUnit(t *testing.T) { - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("writeIfNotExist") translator := &metricTranslator{ diff --git a/exporter/awsemfexporter/metric_declaration_test.go b/exporter/awsemfexporter/metric_declaration_test.go index 469dd0f406ff..4312c0871ee3 100644 --- a/exporter/awsemfexporter/metric_declaration_test.go +++ b/exporter/awsemfexporter/metric_declaration_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -496,7 +496,7 @@ func TestMetricDeclarationMatchesLabels(t *testing.T) { }, } logger := zap.NewNop() - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("a") for _, tc := range testCases { diff --git a/exporter/awsemfexporter/metric_translator.go b/exporter/awsemfexporter/metric_translator.go index a05975078fc9..d8009e8dbf9c 100644 --- a/exporter/awsemfexporter/metric_translator.go +++ b/exporter/awsemfexporter/metric_translator.go @@ -20,7 +20,7 @@ import ( "reflect" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/cwlogs" @@ -41,12 +41,12 @@ const ( fieldPrometheusMetricType = "prom_metric_type" ) -var fieldPrometheusTypes = map[pdata.MetricDataType]string{ - pdata.MetricDataTypeNone: "", - pdata.MetricDataTypeGauge: "gauge", - pdata.MetricDataTypeSum: "counter", - pdata.MetricDataTypeHistogram: "histogram", - pdata.MetricDataTypeSummary: "summary", +var fieldPrometheusTypes = map[pmetric.MetricDataType]string{ + pmetric.MetricDataTypeNone: "", + pmetric.MetricDataTypeGauge: "gauge", + pmetric.MetricDataTypeSum: "counter", + pmetric.MetricDataTypeHistogram: "histogram", + pmetric.MetricDataTypeSummary: "summary", } type cWMetrics struct { @@ -81,7 +81,7 @@ type cWMetricMetadata struct { instrumentationLibraryName string receiver string - metricDataType pdata.MetricDataType + metricDataType pmetric.MetricDataType } type metricTranslator struct { @@ -99,7 +99,7 @@ func newMetricTranslator(config Config) metricTranslator { } // translateOTelToGroupedMetric converts OT metrics to Grouped Metric format. -func (mt metricTranslator) translateOTelToGroupedMetric(rm *pdata.ResourceMetrics, groupedMetrics map[interface{}]*groupedMetric, config *Config) error { +func (mt metricTranslator) translateOTelToGroupedMetric(rm *pmetric.ResourceMetrics, groupedMetrics map[interface{}]*groupedMetric, config *Config) error { timestamp := time.Now().UnixNano() / int64(time.Millisecond) var instrumentationLibName string cWNamespace := getNamespace(rm, config.Namespace) diff --git a/exporter/awsemfexporter/metric_translator_test.go b/exporter/awsemfexporter/metric_translator_test.go index f1ad37fa213f..96b4ae77adbc 100644 --- a/exporter/awsemfexporter/metric_translator_test.go +++ b/exporter/awsemfexporter/metric_translator_test.go @@ -28,8 +28,9 @@ import ( "github.com/golang/protobuf/ptypes/timestamp" "github.com/golang/protobuf/ptypes/wrappers" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -445,7 +446,7 @@ func TestTranslateOtToGroupedMetric(t *testing.T) { testCases := []struct { testName string - metric *pdata.ResourceMetrics + metric *pmetric.ResourceMetrics counterLabels map[string]string timerLabels map[string]string expectedNamespace string @@ -844,7 +845,7 @@ func TestTranslateGroupedMetricToCWMetric(t *testing.T) { timestampMs: timestamp, }, receiver: prometheusReceiver, - metricDataType: pdata.MetricDataTypeGauge, + metricDataType: pmetric.MetricDataTypeGauge, }, }, nil, @@ -2267,7 +2268,7 @@ type testMetric struct { type logGroupStreamTest struct { name string - inputMetrics pdata.Metrics + inputMetrics pmetric.Metrics inLogGroupName string inLogStreamName string outLogGroupName string @@ -2421,23 +2422,23 @@ func TestTranslateOtToGroupedMetricForLogGroupAndStream(t *testing.T) { } } -func generateTestMetrics(tm testMetric) pdata.Metrics { - md := pdata.NewMetrics() +func generateTestMetrics(tm testMetric) pmetric.Metrics { + md := pmetric.NewMetrics() now := time.Now() rm := md.ResourceMetrics().AppendEmpty() - pdata.NewMapFromRaw(tm.resourceAttributeMap).CopyTo(rm.Resource().Attributes()) + pcommon.NewMapFromRaw(tm.resourceAttributeMap).CopyTo(rm.Resource().Attributes()) ms := rm.ScopeMetrics().AppendEmpty().Metrics() for i, name := range tm.metricNames { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) for _, value := range tm.metricValues[i] { dp := m.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(10 * time.Second))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) dp.SetDoubleVal(value) - pdata.NewMapFromRaw(tm.attributeMap).CopyTo(dp.Attributes()) + pcommon.NewMapFromRaw(tm.attributeMap).CopyTo(dp.Attributes()) } } return md diff --git a/exporter/awsemfexporter/util.go b/exporter/awsemfexporter/util.go index 73867d736f52..d830e945a72e 100644 --- a/exporter/awsemfexporter/util.go +++ b/exporter/awsemfexporter/util.go @@ -20,8 +20,9 @@ import ( "strings" "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -68,15 +69,15 @@ func replace(s, pattern string, value string, logger *zap.Logger) (string, bool) } // getNamespace retrieves namespace for given set of metrics from user config. -func getNamespace(rm *pdata.ResourceMetrics, namespace string) string { +func getNamespace(rm *pmetric.ResourceMetrics, namespace string) string { if len(namespace) == 0 { serviceName, svcNameOk := rm.Resource().Attributes().Get(conventions.AttributeServiceName) serviceNamespace, svcNsOk := rm.Resource().Attributes().Get(conventions.AttributeServiceNamespace) - if svcNameOk && svcNsOk && serviceName.Type() == pdata.ValueTypeString && serviceNamespace.Type() == pdata.ValueTypeString { + if svcNameOk && svcNsOk && serviceName.Type() == pcommon.ValueTypeString && serviceNamespace.Type() == pcommon.ValueTypeString { namespace = fmt.Sprintf("%s/%s", serviceNamespace.StringVal(), serviceName.StringVal()) - } else if svcNameOk && serviceName.Type() == pdata.ValueTypeString { + } else if svcNameOk && serviceName.Type() == pcommon.ValueTypeString { namespace = serviceName.StringVal() - } else if svcNsOk && serviceNamespace.Type() == pdata.ValueTypeString { + } else if svcNsOk && serviceNamespace.Type() == pcommon.ValueTypeString { namespace = serviceNamespace.StringVal() } } @@ -88,7 +89,7 @@ func getNamespace(rm *pdata.ResourceMetrics, namespace string) string { } // getLogInfo retrieves the log group and log stream names from a given set of metrics. -func getLogInfo(rm *pdata.ResourceMetrics, cWNamespace string, config *Config) (string, string, bool) { +func getLogInfo(rm *pmetric.ResourceMetrics, cWNamespace string, config *Config) (string, string, bool) { var logGroup, logStream string groupReplaced := true streamReplaced := true @@ -163,15 +164,15 @@ func dimensionRollup(dimensionRollupOption string, labels map[string]string) [][ } // unixNanoToMilliseconds converts a timestamp in nanoseconds to milliseconds. -func unixNanoToMilliseconds(timestamp pdata.Timestamp) int64 { +func unixNanoToMilliseconds(timestamp pcommon.Timestamp) int64 { return int64(uint64(timestamp) / uint64(time.Millisecond)) } -// attrMaptoStringMap converts a pdata.Map to a map[string]string -func attrMaptoStringMap(attrMap pdata.Map) map[string]string { +// attrMaptoStringMap converts a pcommon.Map to a map[string]string +func attrMaptoStringMap(attrMap pcommon.Map) map[string]string { strMap := make(map[string]string, attrMap.Len()) - attrMap.Range(func(k string, v pdata.Value) bool { + attrMap.Range(func(k string, v pcommon.Value) bool { strMap[k] = v.AsString() return true }) diff --git a/exporter/awsemfexporter/util_test.go b/exporter/awsemfexporter/util_test.go index 96a441434148..a2d2137452c0 100644 --- a/exporter/awsemfexporter/util_test.go +++ b/exporter/awsemfexporter/util_test.go @@ -21,8 +21,9 @@ import ( agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" internaldata "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus" @@ -33,7 +34,7 @@ func TestReplacePatternValidTaskId(t *testing.T) { input := "{TaskId}" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("aws.ecs.cluster.name", "test-cluster-name") attrMap.UpsertString("aws.ecs.task.id", "test-task-id") @@ -48,7 +49,7 @@ func TestReplacePatternValidClusterName(t *testing.T) { input := "/aws/ecs/containerinsights/{ClusterName}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("aws.ecs.cluster.name", "test-cluster-name") attrMap.UpsertString("aws.ecs.task.id", "test-task-id") @@ -63,7 +64,7 @@ func TestReplacePatternMissingAttribute(t *testing.T) { input := "/aws/ecs/containerinsights/{ClusterName}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("aws.ecs.task.id", "test-task-id") s, success := replacePatterns(input, attrMaptoStringMap(attrMap), logger) @@ -77,7 +78,7 @@ func TestReplacePatternValidPodName(t *testing.T) { input := "/aws/eks/containerinsights/{PodName}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("aws.eks.cluster.name", "test-cluster-name") attrMap.UpsertString("PodName", "test-pod-001") @@ -92,7 +93,7 @@ func TestReplacePatternValidPod(t *testing.T) { input := "/aws/eks/containerinsights/{PodName}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("aws.eks.cluster.name", "test-cluster-name") attrMap.UpsertString("pod", "test-pod-001") @@ -107,7 +108,7 @@ func TestReplacePatternMissingPodName(t *testing.T) { input := "/aws/eks/containerinsights/{PodName}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("aws.eks.cluster.name", "test-cluster-name") s, success := replacePatterns(input, attrMaptoStringMap(attrMap), logger) @@ -121,7 +122,7 @@ func TestReplacePatternAttrPlaceholderClusterName(t *testing.T) { input := "/aws/ecs/containerinsights/{ClusterName}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("ClusterName", "test-cluster-name") s, success := replacePatterns(input, attrMaptoStringMap(attrMap), logger) @@ -135,7 +136,7 @@ func TestReplacePatternWrongKey(t *testing.T) { input := "/aws/ecs/containerinsights/{WrongKey}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("ClusterName", "test-task-id") s, success := replacePatterns(input, attrMaptoStringMap(attrMap), logger) @@ -149,7 +150,7 @@ func TestReplacePatternNilAttrValue(t *testing.T) { input := "/aws/ecs/containerinsights/{ClusterName}/performance" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.InsertNull("ClusterName") s, success := replacePatterns(input, attrMaptoStringMap(attrMap), logger) @@ -163,7 +164,7 @@ func TestReplacePatternValidTaskDefinitionFamily(t *testing.T) { input := "{TaskDefinitionFamily}" - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.UpsertString("aws.ecs.cluster.name", "test-cluster-name") attrMap.UpsertString("aws.ecs.task.family", "test-task-definition-family") @@ -263,7 +264,7 @@ func TestGetLogInfo(t *testing.T) { }, } - var rms []pdata.ResourceMetrics + var rms []pmetric.ResourceMetrics for _, md := range metrics { rms = append(rms, internaldata.OCToMetrics(md.Node, md.Resource, md.Metrics).ResourceMetrics().At(0)) } diff --git a/exporter/awskinesisexporter/exporter.go b/exporter/awskinesisexporter/exporter.go index 957904f631ec..4d5f2107841f 100644 --- a/exporter/awskinesisexporter/exporter.go +++ b/exporter/awskinesisexporter/exporter.go @@ -25,7 +25,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awskinesisexporter/internal/batch" @@ -115,7 +117,7 @@ func (e Exporter) Shutdown(context.Context) error { } // ConsumeTraces receives a span batch and exports it to AWS Kinesis -func (e Exporter) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (e Exporter) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { bt, err := e.batcher.Traces(td) if err != nil { return err @@ -123,7 +125,7 @@ func (e Exporter) ConsumeTraces(ctx context.Context, td pdata.Traces) error { return e.producer.Put(ctx, bt) } -func (e Exporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (e Exporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { bt, err := e.batcher.Metrics(md) if err != nil { return err @@ -131,7 +133,7 @@ func (e Exporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { return e.producer.Put(ctx, bt) } -func (e Exporter) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +func (e Exporter) ConsumeLogs(ctx context.Context, ld plog.Logs) error { bt, err := e.batcher.Logs(ld) if err != nil { return err diff --git a/exporter/awskinesisexporter/go.mod b/exporter/awskinesisexporter/go.mod index 7a8ce3966238..c4cec86ddd3f 100644 --- a/exporter/awskinesisexporter/go.mod +++ b/exporter/awskinesisexporter/go.mod @@ -7,8 +7,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -16,17 +15,17 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/google/uuid v1.3.0 github.com/jaegertracing/jaeger v1.32.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) require ( github.com/apache/thrift v0.16.0 // indirect github.com/benbjohnson/clock v1.3.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -34,19 +33,14 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.4.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -59,3 +53,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/awskinesisexporter/go.sum b/exporter/awskinesisexporter/go.sum index e5991c174722..b4b25f2c91db 100644 --- a/exporter/awskinesisexporter/go.sum +++ b/exporter/awskinesisexporter/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -26,19 +25,14 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -51,7 +45,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -110,7 +103,6 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= @@ -157,8 +149,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -222,9 +214,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -250,17 +239,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -305,8 +296,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -338,8 +329,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -373,7 +363,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -383,10 +372,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/exporter/awskinesisexporter/internal/batch/encode.go b/exporter/awskinesisexporter/internal/batch/encode.go index 6278186d4a8f..f3a2fe25d95c 100644 --- a/exporter/awskinesisexporter/internal/batch/encode.go +++ b/exporter/awskinesisexporter/internal/batch/encode.go @@ -17,8 +17,9 @@ package batch // import "github.com/open-telemetry/opentelemetry-collector-contr import ( "errors" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awskinesisexporter/internal/key" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2" @@ -34,11 +35,11 @@ var ( // Encoder transforms the internal pipeline format into a configurable // format that is then used to export to kinesis. type Encoder interface { - Metrics(md pdata.Metrics) (*Batch, error) + Metrics(md pmetric.Metrics) (*Batch, error) - Traces(td pdata.Traces) (*Batch, error) + Traces(td ptrace.Traces) (*Batch, error) - Logs(ld pdata.Logs) (*Batch, error) + Logs(ld plog.Logs) (*Batch, error) } func NewEncoder(named string, batchOptions ...Option) (Encoder, error) { @@ -55,16 +56,16 @@ func NewEncoder(named string, batchOptions ...Option) (Encoder, error) { case "zipkin_json": bm.tracesMarshaller = zipkinv2.NewJSONTracesMarshaler() case "otlp", "otlp_proto": - bm.logsMarshaller = otlp.NewProtobufLogsMarshaler() - bm.metricsMarshaller = otlp.NewProtobufMetricsMarshaler() - bm.tracesMarshaller = otlp.NewProtobufTracesMarshaler() + bm.logsMarshaller = plog.NewProtoMarshaler() + bm.metricsMarshaller = pmetric.NewProtoMarshaler() + bm.tracesMarshaller = ptrace.NewProtoMarshaler() case "otlp_json": - bm.logsMarshaller = otlp.NewJSONLogsMarshaler() - bm.metricsMarshaller = otlp.NewJSONMetricsMarshaler() - bm.tracesMarshaller = otlp.NewJSONTracesMarshaler() + bm.logsMarshaller = plog.NewJSONMarshaler() + bm.metricsMarshaller = pmetric.NewJSONMarshaler() + bm.tracesMarshaller = ptrace.NewJSONMarshaler() case "jaeger_proto": // Jaeger encoding is a special case - // since the internal libraries offer no means of pdata.TraceMarshaller. + // since the internal libraries offer no means of ptrace.TraceMarshaller. // In order to preserve historical behavior, a custom type // is used until it can be replaced. return &jaegerEncoder{ diff --git a/exporter/awskinesisexporter/internal/batch/encode_jaeger.go b/exporter/awskinesisexporter/internal/batch/encode_jaeger.go index 0b2ee2eb88a2..ea21a8267257 100644 --- a/exporter/awskinesisexporter/internal/batch/encode_jaeger.go +++ b/exporter/awskinesisexporter/internal/batch/encode_jaeger.go @@ -18,7 +18,9 @@ import ( "github.com/gogo/protobuf/proto" "github.com/jaegertracing/jaeger/model" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awskinesisexporter/internal/key" @@ -38,7 +40,7 @@ type jaegerEncoder struct { var _ Encoder = (*jaegerEncoder)(nil) -func (je jaegerEncoder) Traces(td pdata.Traces) (*Batch, error) { +func (je jaegerEncoder) Traces(td ptrace.Traces) (*Batch, error) { traces, err := jaeger.ProtoFromTraces(td) if err != nil { return nil, consumererror.NewTraces(err, td) @@ -64,5 +66,5 @@ func (je jaegerEncoder) Traces(td pdata.Traces) (*Batch, error) { return bt, errs } -func (jaegerEncoder) Logs(pdata.Logs) (*Batch, error) { return nil, ErrUnsupportedEncoding } -func (jaegerEncoder) Metrics(pdata.Metrics) (*Batch, error) { return nil, ErrUnsupportedEncoding } +func (jaegerEncoder) Logs(plog.Logs) (*Batch, error) { return nil, ErrUnsupportedEncoding } +func (jaegerEncoder) Metrics(pmetric.Metrics) (*Batch, error) { return nil, ErrUnsupportedEncoding } diff --git a/exporter/awskinesisexporter/internal/batch/encode_marshaler.go b/exporter/awskinesisexporter/internal/batch/encode_marshaler.go index bc6daab07cdb..5473610d5ce2 100644 --- a/exporter/awskinesisexporter/internal/batch/encode_marshaler.go +++ b/exporter/awskinesisexporter/internal/batch/encode_marshaler.go @@ -18,7 +18,9 @@ import ( "errors" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awskinesisexporter/internal/key" @@ -28,21 +30,21 @@ type batchMarshaller struct { batchOptions []Option partitioner key.Partition - logsMarshaller pdata.LogsMarshaler - tracesMarshaller pdata.TracesMarshaler - metricsMarshaller pdata.MetricsMarshaler + logsMarshaller plog.Marshaler + tracesMarshaller ptrace.Marshaler + metricsMarshaller pmetric.Marshaler } var _ Encoder = (*batchMarshaller)(nil) -func (bm *batchMarshaller) Logs(ld pdata.Logs) (*Batch, error) { +func (bm *batchMarshaller) Logs(ld plog.Logs) (*Batch, error) { bt := New(bm.batchOptions...) // Due to kinesis limitations of only allowing 1Mb of data per record, // the resource data is copied to the export variable then marshaled // due to no current means of marshaling per resource. - export := pdata.NewLogs() + export := plog.NewLogs() export.ResourceLogs().AppendEmpty() var errs error @@ -67,14 +69,14 @@ func (bm *batchMarshaller) Logs(ld pdata.Logs) (*Batch, error) { return bt, errs } -func (bm *batchMarshaller) Traces(td pdata.Traces) (*Batch, error) { +func (bm *batchMarshaller) Traces(td ptrace.Traces) (*Batch, error) { bt := New(bm.batchOptions...) // Due to kinesis limitations of only allowing 1Mb of data per record, // the resource data is copied to the export variable then marshaled // due to no current means of marshaling per resource. - export := pdata.NewTraces() + export := ptrace.NewTraces() export.ResourceSpans().AppendEmpty() var errs error @@ -99,14 +101,14 @@ func (bm *batchMarshaller) Traces(td pdata.Traces) (*Batch, error) { return bt, errs } -func (bm *batchMarshaller) Metrics(md pdata.Metrics) (*Batch, error) { +func (bm *batchMarshaller) Metrics(md pmetric.Metrics) (*Batch, error) { bt := New(bm.batchOptions...) // Due to kinesis limitations of only allowing 1Mb of data per record, // the resource data is copied to the export variable then marshaled // due to no current means of marshaling per resource. - export := pdata.NewMetrics() + export := pmetric.NewMetrics() export.ResourceMetrics().AppendEmpty() var errs error diff --git a/exporter/awskinesisexporter/internal/batch/encode_unsupported.go b/exporter/awskinesisexporter/internal/batch/encode_unsupported.go index 1c6446427dbb..811ef7a211bf 100644 --- a/exporter/awskinesisexporter/internal/batch/encode_unsupported.go +++ b/exporter/awskinesisexporter/internal/batch/encode_unsupported.go @@ -14,24 +14,28 @@ package batch // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awskinesisexporter/internal/batch" -import "go.opentelemetry.io/collector/model/pdata" +import ( + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) type unsupported struct{} var ( - _ pdata.TracesMarshaler = (*unsupported)(nil) - _ pdata.MetricsMarshaler = (*unsupported)(nil) - _ pdata.LogsMarshaler = (*unsupported)(nil) + _ ptrace.Marshaler = (*unsupported)(nil) + _ pmetric.Marshaler = (*unsupported)(nil) + _ plog.Marshaler = (*unsupported)(nil) ) -func (unsupported) MarshalTraces(_ pdata.Traces) ([]byte, error) { +func (unsupported) MarshalTraces(_ ptrace.Traces) ([]byte, error) { return nil, ErrUnsupportedEncoding } -func (unsupported) MarshalMetrics(_ pdata.Metrics) ([]byte, error) { +func (unsupported) MarshalMetrics(_ pmetric.Metrics) ([]byte, error) { return nil, ErrUnsupportedEncoding } -func (unsupported) MarshalLogs(_ pdata.Logs) ([]byte, error) { +func (unsupported) MarshalLogs(_ plog.Logs) ([]byte, error) { return nil, ErrUnsupportedEncoding } diff --git a/exporter/awskinesisexporter/internal/batch/encoder_test.go b/exporter/awskinesisexporter/internal/batch/encoder_test.go index 89ed2255950a..1e503de91599 100644 --- a/exporter/awskinesisexporter/internal/batch/encoder_test.go +++ b/exporter/awskinesisexporter/internal/batch/encoder_test.go @@ -15,40 +15,43 @@ package batch_test import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) -func NewTestTraces(spanCount int) pdata.Traces { - traces := pdata.NewTraces() +func NewTestTraces(spanCount int) ptrace.Traces { + traces := ptrace.NewTraces() for i := 0; i < spanCount; i++ { span := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("foo") - span.SetStartTimestamp(pdata.Timestamp(10)) - span.SetEndTimestamp(pdata.Timestamp(20)) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetStartTimestamp(pcommon.Timestamp(10)) + span.SetEndTimestamp(pcommon.Timestamp(20)) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) } return traces } -func NewTestMetrics(metricCount int) pdata.Metrics { - metrics := pdata.NewMetrics() +func NewTestMetrics(metricCount int) pmetric.Metrics { + metrics := pmetric.NewMetrics() for i := 0; i < metricCount; i++ { metric := metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() metric.SetName("foo") metric.SetUnit("bar") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.Gauge().DataPoints().AppendEmpty().SetIntVal(int64(i)) } return metrics } -func NewTestLogs(logCount int) pdata.Logs { - logs := pdata.NewLogs() +func NewTestLogs(logCount int) plog.Logs { + logs := plog.NewLogs() for i := 0; i < logCount; i++ { log := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() diff --git a/exporter/awsprometheusremotewriteexporter/go.mod b/exporter/awsprometheusremotewriteexporter/go.mod index fd4084caffc6..ab0c22289ae6 100644 --- a/exporter/awsprometheusremotewriteexporter/go.mod +++ b/exporter/awsprometheusremotewriteexporter/go.mod @@ -6,11 +6,11 @@ require ( github.com/aws/aws-sdk-go v1.43.37 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect @@ -23,7 +23,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -33,14 +33,14 @@ require ( github.com/prometheus/common v0.33.0 // indirect github.com/prometheus/prometheus v1.8.2-0.20220117154355-4855a0c067e2 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tidwall/gjson v1.10.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/tinylru v1.1.0 // indirect github.com/tidwall/wal v1.1.7 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -49,9 +49,8 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -65,3 +64,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/corei replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../../pkg/resourcetotelemetry replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite => ../../pkg/translator/prometheusremotewrite + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/awsprometheusremotewriteexporter/go.sum b/exporter/awsprometheusremotewriteexporter/go.sum index e9a649d77905..94a4f2b67f28 100644 --- a/exporter/awsprometheusremotewriteexporter/go.sum +++ b/exporter/awsprometheusremotewriteexporter/go.sum @@ -211,8 +211,8 @@ github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= @@ -717,7 +717,6 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -868,8 +867,8 @@ github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1193,8 +1192,6 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1322,10 +1319,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -1336,7 +1335,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -1661,8 +1660,9 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/exporter/awsxrayexporter/awsxray.go b/exporter/awsxrayexporter/awsxray.go index 086fdb927468..77bd886c8127 100644 --- a/exporter/awsxrayexporter/awsxray.go +++ b/exporter/awsxrayexporter/awsxray.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator" @@ -49,7 +49,7 @@ func newTracesExporter( return exporterhelper.NewTracesExporter( config, set, - func(ctx context.Context, td pdata.Traces) error { + func(ctx context.Context, td ptrace.Traces) error { var err error logger.Debug("TracesExporter", typeLog, nameLog, zap.Int("#spans", td.SpanCount())) @@ -85,7 +85,7 @@ func newTracesExporter( ) } -func extractResourceSpans(config config.Exporter, logger *zap.Logger, td pdata.Traces) []*string { +func extractResourceSpans(config config.Exporter, logger *zap.Logger, td ptrace.Traces) []*string { documents := make([]*string, 0, td.SpanCount()) for i := 0; i < td.ResourceSpans().Len(); i++ { rspans := td.ResourceSpans().At(i) diff --git a/exporter/awsxrayexporter/awsxray_test.go b/exporter/awsxrayexporter/awsxray_test.go index a3425aae9c14..31142579fcf5 100644 --- a/exporter/awsxrayexporter/awsxray_test.go +++ b/exporter/awsxrayexporter/awsxray_test.go @@ -27,8 +27,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil" @@ -106,10 +107,10 @@ func generateConfig() config.Exporter { return exporterConfig } -func constructSpanData() pdata.Traces { +func constructSpanData() ptrace.Traces { resource := constructResource() - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rspans := traces.ResourceSpans().AppendEmpty() resource.CopyTo(rspans.Resource()) ispans := rspans.ScopeSpans().AppendEmpty() @@ -118,9 +119,9 @@ func constructSpanData() pdata.Traces { } // nolint:unused -func constructW3CSpanData() pdata.Traces { +func constructW3CSpanData() ptrace.Traces { resource := constructResource() - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rspans := traces.ResourceSpans().AppendEmpty() resource.CopyTo(rspans.Resource()) ispans := rspans.ScopeSpans().AppendEmpty() @@ -128,9 +129,9 @@ func constructW3CSpanData() pdata.Traces { return traces } -func constructXrayAndW3CSpanData() pdata.Traces { +func constructXrayAndW3CSpanData() ptrace.Traces { resource := constructResource() - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rspans := traces.ResourceSpans().AppendEmpty() resource.CopyTo(rspans.Resource()) ispans := rspans.ScopeSpans().AppendEmpty() @@ -139,19 +140,19 @@ func constructXrayAndW3CSpanData() pdata.Traces { return traces } -func constructXrayTraceSpanData(ispans pdata.ScopeSpans) { +func constructXrayTraceSpanData(ispans ptrace.ScopeSpans) { constructHTTPClientSpan(newTraceID()).CopyTo(ispans.Spans().AppendEmpty()) constructHTTPServerSpan(newTraceID()).CopyTo(ispans.Spans().AppendEmpty()) } -func constructW3CFormatTraceSpanData(ispans pdata.ScopeSpans) { +func constructW3CFormatTraceSpanData(ispans ptrace.ScopeSpans) { constructHTTPClientSpan(constructW3CTraceID()).CopyTo(ispans.Spans().AppendEmpty()) constructHTTPServerSpan(constructW3CTraceID()).CopyTo(ispans.Spans().AppendEmpty()) } -func constructResource() pdata.Resource { - resource := pdata.NewResource() - attrs := pdata.NewMap() +func constructResource() pcommon.Resource { + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeServiceName, "signup_aggregator") attrs.InsertString(conventions.AttributeContainerName, "signup_aggregator") attrs.InsertString(conventions.AttributeContainerImageName, "otel/signupaggregator") @@ -164,7 +165,7 @@ func constructResource() pdata.Resource { return resource } -func constructHTTPClientSpan(traceID pdata.TraceID) pdata.Span { +func constructHTTPClientSpan(traceID pcommon.TraceID) ptrace.Span { attributes := make(map[string]interface{}) attributes[conventions.AttributeHTTPMethod] = "GET" attributes[conventions.AttributeHTTPURL] = "https://api.example.com/users/junit" @@ -173,16 +174,16 @@ func constructHTTPClientSpan(traceID pdata.TraceID) pdata.Span { startTime := endTime.Add(-90 * time.Second) spanAttributes := constructSpanAttributes(attributes) - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(traceID) span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) span.SetName("/users/junit") - span.SetKind(pdata.SpanKindClient) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindClient) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() status.SetCode(0) status.SetMessage("OK") status.CopyTo(span.Status()) @@ -191,7 +192,7 @@ func constructHTTPClientSpan(traceID pdata.TraceID) pdata.Span { return span } -func constructHTTPServerSpan(traceID pdata.TraceID) pdata.Span { +func constructHTTPServerSpan(traceID pcommon.TraceID) ptrace.Span { attributes := make(map[string]interface{}) attributes[conventions.AttributeHTTPMethod] = "GET" attributes[conventions.AttributeHTTPURL] = "https://api.example.com/users/junit" @@ -201,16 +202,16 @@ func constructHTTPServerSpan(traceID pdata.TraceID) pdata.Span { startTime := endTime.Add(-90 * time.Second) spanAttributes := constructSpanAttributes(attributes) - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(traceID) span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) span.SetName("/users/junit") - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() status.SetCode(0) status.SetMessage("OK") status.CopyTo(span.Status()) @@ -219,8 +220,8 @@ func constructHTTPServerSpan(traceID pdata.TraceID) pdata.Span { return span } -func constructSpanAttributes(attributes map[string]interface{}) pdata.Map { - attrs := pdata.NewMap() +func constructSpanAttributes(attributes map[string]interface{}) pcommon.Map { + attrs := pcommon.NewMap() for key, value := range attributes { if cast, ok := value.(int); ok { attrs.InsertInt(key, int64(cast)) @@ -233,7 +234,7 @@ func constructSpanAttributes(attributes map[string]interface{}) pdata.Map { return attrs } -func newTraceID() pdata.TraceID { +func newTraceID() pcommon.TraceID { var r [16]byte epoch := time.Now().Unix() binary.BigEndian.PutUint32(r[0:4], uint32(epoch)) @@ -241,22 +242,22 @@ func newTraceID() pdata.TraceID { if err != nil { panic(err) } - return pdata.NewTraceID(r) + return pcommon.NewTraceID(r) } -func constructW3CTraceID() pdata.TraceID { +func constructW3CTraceID() pcommon.TraceID { var r [16]byte for i := range r { r[i] = byte(rand.Intn(128)) } - return pdata.NewTraceID(r) + return pcommon.NewTraceID(r) } -func newSegmentID() pdata.SpanID { +func newSegmentID() pcommon.SpanID { var r [8]byte _, err := rand.Read(r[:]) if err != nil { panic(err) } - return pdata.NewSpanID(r) + return pcommon.NewSpanID(r) } diff --git a/exporter/awsxrayexporter/go.mod b/exporter/awsxrayexporter/go.mod index 06a81f099479..def1c1403ef2 100644 --- a/exporter/awsxrayexporter/go.mod +++ b/exporter/awsxrayexporter/go.mod @@ -7,26 +7,25 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil v0.0.0-00010101000000-000000000000 github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.0.0-00010101000000-000000000000 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.2.0 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -34,12 +33,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) @@ -47,3 +42,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray => ./../../internal/aws/xray replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/awsutil => ./../../internal/aws/awsutil + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/awsxrayexporter/go.sum b/exporter/awsxrayexporter/go.sum index c6a6d16902d8..566b3f26d6bb 100644 --- a/exporter/awsxrayexporter/go.sum +++ b/exporter/awsxrayexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.43.37 h1:kyZ7UjaPZaCik+asF33UFOOYSwr9liDRr/UM/vuw8yY= @@ -20,19 +17,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -40,9 +29,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -50,7 +36,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -67,18 +52,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -88,13 +69,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -126,8 +104,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -168,15 +146,11 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -184,7 +158,6 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -194,20 +167,21 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -231,20 +205,18 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -260,7 +232,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -269,14 +240,12 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -299,22 +268,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -324,19 +287,13 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/awsxrayexporter/internal/translator/aws.go b/exporter/awsxrayexporter/internal/translator/aws.go index 22064affb5e9..c6da0a8376d1 100644 --- a/exporter/awsxrayexporter/internal/translator/aws.go +++ b/exporter/awsxrayexporter/internal/translator/aws.go @@ -19,13 +19,13 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func makeAws(attributes map[string]pdata.Value, resource pdata.Resource) (map[string]pdata.Value, *awsxray.AWSData) { +func makeAws(attributes map[string]pcommon.Value, resource pcommon.Resource) (map[string]pcommon.Value, *awsxray.AWSData) { var ( cloud string service string @@ -56,8 +56,8 @@ func makeAws(attributes map[string]pdata.Value, resource pdata.Resource) (map[st taskArn string taskFamily string launchType string - logGroups pdata.Slice - logGroupArns pdata.Slice + logGroups pcommon.Slice + logGroupArns pcommon.Slice cwl []awsxray.LogGroupMetadata ec2 *awsxray.EC2Metadata ecs *awsxray.ECSMetadata @@ -65,8 +65,8 @@ func makeAws(attributes map[string]pdata.Value, resource pdata.Resource) (map[st eks *awsxray.EKSMetadata ) - filtered := make(map[string]pdata.Value) - resource.Attributes().Range(func(key string, value pdata.Value) bool { + filtered := make(map[string]pcommon.Value) + resource.Attributes().Range(func(key string, value pcommon.Value) bool { switch key { case conventions.AttributeCloudProvider: cloud = value.StringVal() @@ -137,7 +137,7 @@ func makeAws(attributes map[string]pdata.Value, resource pdata.Resource) (map[st case awsxray.AWSOperationAttribute: // Determinstically handled with if else above case awsxray.AWSAccountAttribute: - if value.Type() != pdata.ValueTypeEmpty { + if value.Type() != pcommon.ValueTypeEmpty { account = value.StringVal() } case awsxray.AWSRegionAttribute: @@ -212,9 +212,9 @@ func makeAws(attributes map[string]pdata.Value, resource pdata.Resource) (map[st // Since we must couple log group ARNs and Log Group Names in the same CWLogs object, we first try to derive the // names from the ARN, then fall back to just recording the names - if logGroupArns != (pdata.Slice{}) && logGroupArns.Len() > 0 { + if logGroupArns != (pcommon.Slice{}) && logGroupArns.Len() > 0 { cwl = getLogGroupMetadata(logGroupArns, true) - } else if logGroups != (pdata.Slice{}) && logGroups.Len() > 0 { + } else if logGroups != (pcommon.Slice{}) && logGroups.Len() > 0 { cwl = getLogGroupMetadata(logGroups, false) } @@ -251,7 +251,7 @@ func makeAws(attributes map[string]pdata.Value, resource pdata.Resource) (map[st // Given an array of log group ARNs, create a corresponding amount of LogGroupMetadata objects with log_group and arn // populated, or given an array of just log group names, create the LogGroupMetadata objects with arn omitted -func getLogGroupMetadata(logGroups pdata.Slice, isArn bool) []awsxray.LogGroupMetadata { +func getLogGroupMetadata(logGroups pcommon.Slice, isArn bool) []awsxray.LogGroupMetadata { var lgm []awsxray.LogGroupMetadata for i := 0; i < logGroups.Len(); i++ { if isArn { diff --git a/exporter/awsxrayexporter/internal/translator/aws_test.go b/exporter/awsxrayexporter/internal/translator/aws_test.go index 4e40f9b1579b..e4c3f0696813 100644 --- a/exporter/awsxrayexporter/internal/translator/aws_test.go +++ b/exporter/awsxrayexporter/internal/translator/aws_test.go @@ -19,8 +19,8 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -29,8 +29,8 @@ func TestAwsFromEc2Resource(t *testing.T) { instanceID := "i-00f7c0bcb26da2a99" hostType := "m5.xlarge" imageID := "ami-0123456789" - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSEC2) attrs.InsertString(conventions.AttributeCloudAccountID, "123456789") @@ -40,7 +40,7 @@ func TestAwsFromEc2Resource(t *testing.T) { attrs.InsertString(conventions.AttributeHostImageID, imageID) attrs.CopyTo(resource.Attributes()) - attributes := make(map[string]pdata.Value) + attributes := make(map[string]pcommon.Value) filtered, awsData := makeAws(attributes, resource) @@ -69,8 +69,8 @@ func TestAwsFromEcsResource(t *testing.T) { taskArn := "arn:aws:ecs:us-west-2:123456789123:task/123" clusterArn := "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster" containerArn := "arn:aws:ecs:us-west-2:123456789123:container-instance/123" - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSECS) attrs.InsertString(conventions.AttributeCloudAccountID, "123456789") @@ -89,7 +89,7 @@ func TestAwsFromEcsResource(t *testing.T) { attrs.CopyTo(resource.Attributes()) - attributes := make(map[string]pdata.Value) + attributes := make(map[string]pcommon.Value) filtered, awsData := makeAws(attributes, resource) @@ -114,8 +114,8 @@ func TestAwsFromEcsResource(t *testing.T) { func TestAwsFromBeanstalkResource(t *testing.T) { deployID := "232" versionLabel := "4" - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSElasticBeanstalk) attrs.InsertString(conventions.AttributeCloudAccountID, "123456789") @@ -125,7 +125,7 @@ func TestAwsFromBeanstalkResource(t *testing.T) { attrs.InsertString(conventions.AttributeServiceVersion, versionLabel) attrs.CopyTo(resource.Attributes()) - attributes := make(map[string]pdata.Value) + attributes := make(map[string]pcommon.Value) filtered, awsData := makeAws(attributes, resource) @@ -146,8 +146,8 @@ func TestAwsFromEksResource(t *testing.T) { instanceID := "i-00f7c0bcb26da2a99" containerName := "signup_aggregator-x82ufje83" containerID := "0123456789A" - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSEKS) attrs.InsertString(conventions.AttributeCloudAccountID, "123456789") @@ -164,7 +164,7 @@ func TestAwsFromEksResource(t *testing.T) { attrs.InsertString(conventions.AttributeHostType, "m5.xlarge") attrs.CopyTo(resource.Attributes()) - attributes := make(map[string]pdata.Value) + attributes := make(map[string]pcommon.Value) filtered, awsData := makeAws(attributes, resource) @@ -185,8 +185,8 @@ func TestAwsWithAwsSqsResources(t *testing.T) { instanceID := "i-00f7c0bcb26da2a99" containerName := "signup_aggregator-x82ufje83" containerID := "0123456789A" - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudAccountID, "123456789") attrs.InsertString(conventions.AttributeCloudAvailabilityZone, "us-east-1c") @@ -203,12 +203,12 @@ func TestAwsWithAwsSqsResources(t *testing.T) { attrs.InsertString(conventions.AttributeHostType, "m5.xlarge") queueURL := "https://sqs.use1.amazonaws.com/Meltdown-Alerts" - attributes := make(map[string]pdata.Value) - attributes[awsxray.AWSOperationAttribute] = pdata.NewValueString("SendMessage") - attributes[awsxray.AWSAccountAttribute] = pdata.NewValueString("987654321") - attributes[awsxray.AWSRegionAttribute] = pdata.NewValueString("us-east-2") - attributes[awsxray.AWSQueueURLAttribute] = pdata.NewValueString(queueURL) - attributes["employee.id"] = pdata.NewValueString("XB477") + attributes := make(map[string]pcommon.Value) + attributes[awsxray.AWSOperationAttribute] = pcommon.NewValueString("SendMessage") + attributes[awsxray.AWSAccountAttribute] = pcommon.NewValueString("987654321") + attributes[awsxray.AWSRegionAttribute] = pcommon.NewValueString("us-east-2") + attributes[awsxray.AWSQueueURLAttribute] = pcommon.NewValueString(queueURL) + attributes["employee.id"] = pcommon.NewValueString("XB477") filtered, awsData := makeAws(attributes, resource) @@ -219,9 +219,9 @@ func TestAwsWithAwsSqsResources(t *testing.T) { } func TestAwsWithRpcAttributes(t *testing.T) { - resource := pdata.NewResource() - attributes := make(map[string]pdata.Value) - attributes[conventions.AttributeRPCMethod] = pdata.NewValueString("ListBuckets") + resource := pcommon.NewResource() + attributes := make(map[string]pcommon.Value) + attributes[conventions.AttributeRPCMethod] = pcommon.NewValueString("ListBuckets") _, awsData := makeAws(attributes, resource) @@ -231,10 +231,10 @@ func TestAwsWithRpcAttributes(t *testing.T) { func TestAwsWithSqsAlternateAttribute(t *testing.T) { queueURL := "https://sqs.use1.amazonaws.com/Meltdown-Alerts" - attributes := make(map[string]pdata.Value) - attributes[awsxray.AWSQueueURLAttribute2] = pdata.NewValueString(queueURL) + attributes := make(map[string]pcommon.Value) + attributes[awsxray.AWSQueueURLAttribute2] = pcommon.NewValueString(queueURL) - filtered, awsData := makeAws(attributes, pdata.NewResource()) + filtered, awsData := makeAws(attributes, pcommon.NewResource()) assert.NotNil(t, filtered) assert.NotNil(t, awsData) @@ -245,8 +245,8 @@ func TestAwsWithAwsDynamoDbResources(t *testing.T) { instanceID := "i-00f7c0bcb26da2a99" containerName := "signup_aggregator-x82ufje83" containerID := "0123456789A" - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudAccountID, "123456789") attrs.InsertString(conventions.AttributeCloudAvailabilityZone, "us-east-1c") @@ -263,11 +263,11 @@ func TestAwsWithAwsDynamoDbResources(t *testing.T) { attrs.InsertString(conventions.AttributeHostType, "m5.xlarge") tableName := "WIDGET_TYPES" - attributes := make(map[string]pdata.Value) - attributes[conventions.AttributeRPCMethod] = pdata.NewValueString("IncorrectAWSSDKOperation") - attributes[awsxray.AWSOperationAttribute] = pdata.NewValueString("PutItem") - attributes[awsxray.AWSRequestIDAttribute] = pdata.NewValueString("75107C82-EC8A-4F75-883F-4440B491B0AB") - attributes[awsxray.AWSTableNameAttribute] = pdata.NewValueString(tableName) + attributes := make(map[string]pcommon.Value) + attributes[conventions.AttributeRPCMethod] = pcommon.NewValueString("IncorrectAWSSDKOperation") + attributes[awsxray.AWSOperationAttribute] = pcommon.NewValueString("PutItem") + attributes[awsxray.AWSRequestIDAttribute] = pcommon.NewValueString("75107C82-EC8A-4F75-883F-4440B491B0AB") + attributes[awsxray.AWSTableNameAttribute] = pcommon.NewValueString(tableName) filtered, awsData := makeAws(attributes, resource) @@ -280,10 +280,10 @@ func TestAwsWithAwsDynamoDbResources(t *testing.T) { func TestAwsWithDynamoDbAlternateAttribute(t *testing.T) { tableName := "MyTable" - attributes := make(map[string]pdata.Value) - attributes[awsxray.AWSTableNameAttribute2] = pdata.NewValueString(tableName) + attributes := make(map[string]pcommon.Value) + attributes[awsxray.AWSTableNameAttribute2] = pcommon.NewValueString(tableName) - filtered, awsData := makeAws(attributes, pdata.NewResource()) + filtered, awsData := makeAws(attributes, pcommon.NewResource()) assert.NotNil(t, filtered) assert.NotNil(t, awsData) @@ -292,10 +292,10 @@ func TestAwsWithDynamoDbAlternateAttribute(t *testing.T) { func TestAwsWithRequestIdAlternateAttribute(t *testing.T) { requestid := "12345-request" - attributes := make(map[string]pdata.Value) - attributes[awsxray.AWSRequestIDAttribute2] = pdata.NewValueString(requestid) + attributes := make(map[string]pcommon.Value) + attributes[awsxray.AWSRequestIDAttribute2] = pcommon.NewValueString(requestid) - filtered, awsData := makeAws(attributes, pdata.NewResource()) + filtered, awsData := makeAws(attributes, pcommon.NewResource()) assert.NotNil(t, filtered) assert.NotNil(t, awsData) @@ -303,8 +303,8 @@ func TestAwsWithRequestIdAlternateAttribute(t *testing.T) { } func TestJavaSDK(t *testing.T) { - attributes := make(map[string]pdata.Value) - resource := pdata.NewResource() + attributes := make(map[string]pcommon.Value) + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeTelemetrySDKName, "opentelemetry") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, "java") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKVersion, "1.2.3") @@ -318,8 +318,8 @@ func TestJavaSDK(t *testing.T) { } func TestJavaAutoInstrumentation(t *testing.T) { - attributes := make(map[string]pdata.Value) - resource := pdata.NewResource() + attributes := make(map[string]pcommon.Value) + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeTelemetrySDKName, "opentelemetry") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, "java") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKVersion, "1.2.3") @@ -335,8 +335,8 @@ func TestJavaAutoInstrumentation(t *testing.T) { } func TestGoSDK(t *testing.T) { - attributes := make(map[string]pdata.Value) - resource := pdata.NewResource() + attributes := make(map[string]pcommon.Value) + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeTelemetrySDKName, "opentelemetry") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, "go") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKVersion, "2.0.3") @@ -350,8 +350,8 @@ func TestGoSDK(t *testing.T) { } func TestCustomSDK(t *testing.T) { - attributes := make(map[string]pdata.Value) - resource := pdata.NewResource() + attributes := make(map[string]pcommon.Value) + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeTelemetrySDKName, "opentracing") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, "java") resource.Attributes().InsertString(conventions.AttributeTelemetrySDKVersion, "2.0.3") @@ -372,9 +372,9 @@ func TestLogGroups(t *testing.T) { LogGroup: awsxray.String("group2"), } - attributes := make(map[string]pdata.Value) - resource := pdata.NewResource() - lg := pdata.NewValueSlice() + attributes := make(map[string]pcommon.Value) + resource := pcommon.NewResource() + lg := pcommon.NewValueSlice() ava := lg.SliceVal() ava.EnsureCapacity(2) ava.AppendEmpty().SetStringVal("group1") @@ -403,9 +403,9 @@ func TestLogGroupsFromArns(t *testing.T) { Arn: awsxray.String(group2), } - attributes := make(map[string]pdata.Value) - resource := pdata.NewResource() - lga := pdata.NewValueSlice() + attributes := make(map[string]pcommon.Value) + resource := pcommon.NewResource() + lga := pcommon.NewValueSlice() ava := lga.SliceVal() ava.EnsureCapacity(2) ava.AppendEmpty().SetStringVal(group1) diff --git a/exporter/awsxrayexporter/internal/translator/cause.go b/exporter/awsxrayexporter/internal/translator/cause.go index 8c84f559798c..20508b9965c8 100644 --- a/exporter/awsxrayexporter/internal/translator/cause.go +++ b/exporter/awsxrayexporter/internal/translator/cause.go @@ -22,8 +22,9 @@ import ( "strings" "github.com/aws/aws-sdk-go/aws" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -32,10 +33,10 @@ import ( // TODO: Remove this when collector defines this semantic convention. const ExceptionEventName = "exception" -func makeCause(span pdata.Span, attributes map[string]pdata.Value, resource pdata.Resource) (isError, isFault, isThrottle bool, - filtered map[string]pdata.Value, cause *awsxray.CauseData) { +func makeCause(span ptrace.Span, attributes map[string]pcommon.Value, resource pcommon.Resource) (isError, isFault, isThrottle bool, + filtered map[string]pcommon.Value, cause *awsxray.CauseData) { status := span.Status() - if status.Code() != pdata.StatusCodeError { + if status.Code() != ptrace.StatusCodeError { return false, false, false, attributes, nil } filtered = attributes @@ -91,7 +92,7 @@ func makeCause(span pdata.Span, attributes map[string]pdata.Value, resource pdat } else { // Use OpenCensus behavior if we didn't find any exception events to ease migration. message = status.Message() - filtered = make(map[string]pdata.Value) + filtered = make(map[string]pcommon.Value) for key, value := range attributes { switch key { case "http.status_text": diff --git a/exporter/awsxrayexporter/internal/translator/cause_test.go b/exporter/awsxrayexporter/internal/translator/cause_test.go index 7f36ebf1d46f..0b89859a4dd7 100644 --- a/exporter/awsxrayexporter/internal/translator/cause_test.go +++ b/exporter/awsxrayexporter/internal/translator/cause_test.go @@ -20,20 +20,21 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestCauseWithExceptions(t *testing.T) { errorMsg := "this is a test" attributeMap := make(map[string]interface{}) - span := constructExceptionServerSpan(attributeMap, pdata.StatusCodeError) + span := constructExceptionServerSpan(attributeMap, ptrace.StatusCodeError) span.Status().SetMessage(errorMsg) event1 := span.Events().AppendEmpty() event1.SetName(ExceptionEventName) - attributes := pdata.NewMap() + attributes := pcommon.NewMap() attributes.InsertString(conventions.AttributeExceptionType, "java.lang.IllegalStateException") attributes.InsertString(conventions.AttributeExceptionMessage, "bad state") attributes.InsertString(conventions.AttributeExceptionStacktrace, `java.lang.IllegalStateException: state is not legal @@ -45,13 +46,13 @@ Caused by: java.lang.IllegalArgumentException: bad argument`) event2 := span.Events().AppendEmpty() event2.SetName(ExceptionEventName) - attributes = pdata.NewMap() + attributes = pcommon.NewMap() attributes.InsertString(conventions.AttributeExceptionType, "EmptyError") attributes.CopyTo(event2.Attributes()) filtered, _ := makeHTTP(span) - res := pdata.NewResource() + res := pcommon.NewResource() res.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, "java") isError, isFault, isThrottle, filteredResult, cause := makeCause(span, filtered, res) @@ -78,11 +79,11 @@ func TestCauseWithStatusMessage(t *testing.T) { attributes[conventions.AttributeHTTPMethod] = "POST" attributes[conventions.AttributeHTTPURL] = "https://api.example.com/widgets" attributes[conventions.AttributeHTTPStatusCode] = 500 - span := constructExceptionServerSpan(attributes, pdata.StatusCodeError) + span := constructExceptionServerSpan(attributes, ptrace.StatusCodeError) span.Status().SetMessage(errorMsg) filtered, _ := makeHTTP(span) - res := pdata.NewResource() + res := pcommon.NewResource() isError, isFault, isThrottle, filtered, cause := makeCause(span, filtered, res) assert.True(t, isFault) @@ -106,10 +107,10 @@ func TestCauseWithHttpStatusMessage(t *testing.T) { attributes[conventions.AttributeHTTPURL] = "https://api.example.com/widgets" attributes[conventions.AttributeHTTPStatusCode] = 500 attributes["http.status_text"] = errorMsg - span := constructExceptionServerSpan(attributes, pdata.StatusCodeError) + span := constructExceptionServerSpan(attributes, ptrace.StatusCodeError) filtered, _ := makeHTTP(span) - res := pdata.NewResource() + res := pcommon.NewResource() isError, isFault, isThrottle, filtered, cause := makeCause(span, filtered, res) assert.True(t, isFault) @@ -134,13 +135,13 @@ func TestCauseWithZeroStatusMessage(t *testing.T) { attributes[conventions.AttributeHTTPStatusCode] = 500 attributes["http.status_text"] = errorMsg - span := constructExceptionServerSpan(attributes, pdata.StatusCodeUnset) + span := constructExceptionServerSpan(attributes, ptrace.StatusCodeUnset) filtered, _ := makeHTTP(span) // Status is used to determine whether an error or not. // This span illustrates incorrect instrumentation, // marking a success status with an error http status code, and status wins. // We do not expect to see such spans in practice. - res := pdata.NewResource() + res := pcommon.NewResource() isError, isFault, isThrottle, filtered, cause := makeCause(span, filtered, res) assert.False(t, isError) @@ -158,10 +159,10 @@ func TestCauseWithClientErrorMessage(t *testing.T) { attributes[conventions.AttributeHTTPStatusCode] = 499 attributes["http.status_text"] = errorMsg - span := constructExceptionServerSpan(attributes, pdata.StatusCodeError) + span := constructExceptionServerSpan(attributes, ptrace.StatusCodeError) filtered, _ := makeHTTP(span) - res := pdata.NewResource() + res := pcommon.NewResource() isError, isFault, isThrottle, filtered, cause := makeCause(span, filtered, res) assert.True(t, isError) @@ -179,10 +180,10 @@ func TestCauseWithThrottled(t *testing.T) { attributes[conventions.AttributeHTTPStatusCode] = 429 attributes["http.status_text"] = errorMsg - span := constructExceptionServerSpan(attributes, pdata.StatusCodeError) + span := constructExceptionServerSpan(attributes, ptrace.StatusCodeError) filtered, _ := makeHTTP(span) - res := pdata.NewResource() + res := pcommon.NewResource() isError, isFault, isThrottle, filtered, cause := makeCause(span, filtered, res) assert.True(t, isError) @@ -192,21 +193,21 @@ func TestCauseWithThrottled(t *testing.T) { assert.NotNil(t, cause) } -func constructExceptionServerSpan(attributes map[string]interface{}, statuscode pdata.StatusCode) pdata.Span { +func constructExceptionServerSpan(attributes map[string]interface{}, statuscode ptrace.StatusCode) ptrace.Span { endTime := time.Now().Round(time.Second) startTime := endTime.Add(-90 * time.Second) spanAttributes := constructSpanAttributes(attributes) - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(newTraceID()) span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) span.SetName("/widgets") - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() status.SetCode(statuscode) status.CopyTo(span.Status()) diff --git a/exporter/awsxrayexporter/internal/translator/http.go b/exporter/awsxrayexporter/internal/translator/http.go index cb2ba6356daf..37dfdcdcd85e 100644 --- a/exporter/awsxrayexporter/internal/translator/http.go +++ b/exporter/awsxrayexporter/internal/translator/http.go @@ -18,19 +18,20 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func makeHTTP(span pdata.Span) (map[string]pdata.Value, *awsxray.HTTPData) { +func makeHTTP(span ptrace.Span) (map[string]pcommon.Value, *awsxray.HTTPData) { var ( info = awsxray.HTTPData{ Request: &awsxray.RequestData{}, Response: &awsxray.ResponseData{}, } - filtered = make(map[string]pdata.Value) + filtered = make(map[string]pcommon.Value) urlParts = make(map[string]string) ) @@ -41,7 +42,7 @@ func makeHTTP(span pdata.Span) (map[string]pdata.Value, *awsxray.HTTPData) { hasHTTP := false hasHTTPRequestURLAttributes := false - span.Attributes().Range(func(key string, value pdata.Value) bool { + span.Attributes().Range(func(key string, value pcommon.Value) bool { switch key { case conventions.AttributeHTTPMethod: info.Request.Method = awsxray.String(value.StringVal()) @@ -112,7 +113,7 @@ func makeHTTP(span pdata.Span) (map[string]pdata.Value, *awsxray.HTTPData) { } if hasHTTPRequestURLAttributes { - if span.Kind() == pdata.SpanKindServer { + if span.Kind() == ptrace.SpanKindServer { info.Request.URL = awsxray.String(constructServerURL(urlParts)) } else { info.Request.URL = awsxray.String(constructClientURL(urlParts)) @@ -124,7 +125,7 @@ func makeHTTP(span pdata.Span) (map[string]pdata.Value, *awsxray.HTTPData) { return filtered, &info } -func extractResponseSizeFromEvents(span pdata.Span) int64 { +func extractResponseSizeFromEvents(span ptrace.Span) int64 { // Support insrumentation that sets response size in span or as an event. size := extractResponseSizeFromAttributes(span.Attributes()) if size != 0 { @@ -140,7 +141,7 @@ func extractResponseSizeFromEvents(span pdata.Span) int64 { return size } -func extractResponseSizeFromAttributes(attributes pdata.Map) int64 { +func extractResponseSizeFromAttributes(attributes pcommon.Map) int64 { typeVal, ok := attributes.Get("message.type") if ok && typeVal.StringVal() == "RECEIVED" { if sizeVal, ok := attributes.Get(conventions.AttributeMessagingMessagePayloadSizeBytes); ok { diff --git a/exporter/awsxrayexporter/internal/translator/http_test.go b/exporter/awsxrayexporter/internal/translator/http_test.go index 962ac626211d..281f3e0d3fd0 100644 --- a/exporter/awsxrayexporter/internal/translator/http_test.go +++ b/exporter/awsxrayexporter/internal/translator/http_test.go @@ -20,8 +20,9 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestClientSpanWithURLAttribute(t *testing.T) { @@ -271,21 +272,21 @@ func TestSpanWithNotEnoughHTTPRequestURLAttributes(t *testing.T) { assert.NotNil(t, filtered) } -func constructHTTPClientSpan(attributes map[string]interface{}) pdata.Span { +func constructHTTPClientSpan(attributes map[string]interface{}) ptrace.Span { endTime := time.Now().Round(time.Second) startTime := endTime.Add(-90 * time.Second) spanAttributes := constructSpanAttributes(attributes) - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(newTraceID()) span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) span.SetName("/users/junit") - span.SetKind(pdata.SpanKindClient) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindClient) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() status.SetCode(0) status.SetMessage("OK") status.CopyTo(span.Status()) @@ -294,21 +295,21 @@ func constructHTTPClientSpan(attributes map[string]interface{}) pdata.Span { return span } -func constructHTTPServerSpan(attributes map[string]interface{}) pdata.Span { +func constructHTTPServerSpan(attributes map[string]interface{}) ptrace.Span { endTime := time.Now().Round(time.Second) startTime := endTime.Add(-90 * time.Second) spanAttributes := constructSpanAttributes(attributes) - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(newTraceID()) span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) span.SetName("/users/junit") - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() status.SetCode(0) status.SetMessage("OK") status.CopyTo(span.Status()) diff --git a/exporter/awsxrayexporter/internal/translator/segment.go b/exporter/awsxrayexporter/internal/translator/segment.go index 82528984ff3b..5eeeba8998ac 100644 --- a/exporter/awsxrayexporter/internal/translator/segment.go +++ b/exporter/awsxrayexporter/internal/translator/segment.go @@ -25,8 +25,9 @@ import ( "time" awsP "github.com/aws/aws-sdk-go/aws" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.8.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -65,7 +66,7 @@ var ( ) // MakeSegmentDocumentString converts an OpenTelemetry Span to an X-Ray Segment and then serialzies to JSON -func MakeSegmentDocumentString(span pdata.Span, resource pdata.Resource, indexedAttrs []string, indexAllAttrs bool) (string, error) { +func MakeSegmentDocumentString(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool) (string, error) { segment, err := MakeSegment(span, resource, indexedAttrs, indexAllAttrs) if err != nil { return "", err @@ -80,11 +81,11 @@ func MakeSegmentDocumentString(span pdata.Span, resource pdata.Resource, indexed } // MakeSegment converts an OpenTelemetry Span to an X-Ray Segment -func MakeSegment(span pdata.Span, resource pdata.Resource, indexedAttrs []string, indexAllAttrs bool) (*awsxray.Segment, error) { +func MakeSegment(span ptrace.Span, resource pcommon.Resource, indexedAttrs []string, indexAllAttrs bool) (*awsxray.Segment, error) { var segmentType string storeResource := true - if span.Kind() != pdata.SpanKindServer && + if span.Kind() != ptrace.SpanKindServer && !span.ParentSpanID().IsEmpty() { segmentType = "subsegment" // We only store the resource information for segments, the local root. @@ -154,7 +155,7 @@ func MakeSegment(span pdata.Span, resource pdata.Resource, indexedAttrs []string } } - if name == "" && span.Kind() == pdata.SpanKindServer { + if name == "" && span.Kind() == ptrace.SpanKindServer { // Only for a server span, we can use the resource. if service, ok := resource.Attributes().Get(conventions.AttributeServiceName); ok { name = service.StringVal() @@ -183,7 +184,7 @@ func MakeSegment(span pdata.Span, resource pdata.Resource, indexedAttrs []string name = fixSegmentName(span.Name()) } - if namespace == "" && span.Kind() == pdata.SpanKindClient { + if namespace == "" && span.Kind() == ptrace.SpanKindClient { namespace = "remote" } @@ -212,16 +213,16 @@ func MakeSegment(span pdata.Span, resource pdata.Resource, indexedAttrs []string } // newSegmentID generates a new valid X-Ray SegmentID -func newSegmentID() pdata.SpanID { +func newSegmentID() pcommon.SpanID { var r [8]byte _, err := rand.Read(r[:]) if err != nil { panic(err) } - return pdata.NewSpanID(r) + return pcommon.NewSpanID(r) } -func determineAwsOrigin(resource pdata.Resource) string { +func determineAwsOrigin(resource pcommon.Resource) string { if resource.Attributes().Len() == 0 { return "" } @@ -276,7 +277,7 @@ func determineAwsOrigin(resource pdata.Resource) string { // * For example, 10:00AM December 2nd, 2016 PST in epoch time is 1480615200 seconds, // or 58406520 in hexadecimal. // * A 96-bit identifier for the trace, globally unique, in 24 hexadecimal digits. -func convertToAmazonTraceID(traceID pdata.TraceID) (string, error) { +func convertToAmazonTraceID(traceID pcommon.TraceID) (string, error) { const ( // maxAge of 28 days. AWS has a 30 day limit, let's be conservative rather than // hit the limit @@ -314,11 +315,11 @@ func convertToAmazonTraceID(traceID pdata.TraceID) (string, error) { return string(content[0:traceIDLength]), nil } -func timestampToFloatSeconds(ts pdata.Timestamp) float64 { +func timestampToFloatSeconds(ts pcommon.Timestamp) float64 { return float64(ts) / float64(time.Second) } -func makeXRayAttributes(attributes map[string]pdata.Value, resource pdata.Resource, storeResource bool, indexedAttrs []string, indexAllAttrs bool) ( +func makeXRayAttributes(attributes map[string]pcommon.Value, resource pcommon.Resource, storeResource bool, indexedAttrs []string, indexAllAttrs bool) ( string, map[string]interface{}, map[string]map[string]interface{}) { var ( annotations = map[string]interface{}{} @@ -345,7 +346,7 @@ func makeXRayAttributes(attributes map[string]pdata.Value, resource pdata.Resour } if storeResource { - resource.Attributes().Range(func(key string, value pdata.Value) bool { + resource.Attributes().Range(func(key string, value pcommon.Value) bool { key = "otel.resource." + key annoVal := annotationValue(value) indexed := indexAllAttrs || indexedKeys[key] @@ -394,38 +395,38 @@ func makeXRayAttributes(attributes map[string]pdata.Value, resource pdata.Resour return user, annotations, metadata } -func annotationValue(value pdata.Value) interface{} { +func annotationValue(value pcommon.Value) interface{} { switch value.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return value.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return value.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return value.DoubleVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return value.BoolVal() } return nil } -func metadataValue(value pdata.Value) interface{} { +func metadataValue(value pcommon.Value) interface{} { switch value.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return value.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return value.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return value.DoubleVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return value.BoolVal() - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: converted := map[string]interface{}{} - value.MapVal().Range(func(key string, value pdata.Value) bool { + value.MapVal().Range(func(key string, value pcommon.Value) bool { converted[key] = metadataValue(value) return true }) return converted - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: arrVal := value.SliceVal() converted := make([]interface{}, arrVal.Len()) for i := 0; i < arrVal.Len(); i++ { diff --git a/exporter/awsxrayexporter/internal/translator/segment_test.go b/exporter/awsxrayexporter/internal/translator/segment_test.go index 62f9dfcda866..63da87441127 100644 --- a/exporter/awsxrayexporter/internal/translator/segment_test.go +++ b/exporter/awsxrayexporter/internal/translator/segment_test.go @@ -23,8 +23,9 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.8.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -143,7 +144,7 @@ func TestServerSpanWithInternalServerError(t *testing.T) { attributes[conventions.AttributeHTTPUserAgent] = userAgent attributes[conventions.AttributeEnduserID] = enduser resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, errorMessage, attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, errorMessage, attributes) timeEvents := constructTimedEventsWithSentMessageEvent(span.StartTimestamp()) timeEvents.CopyTo(span.Events()) @@ -170,7 +171,7 @@ func TestServerSpanWithThrottle(t *testing.T) { attributes[conventions.AttributeHTTPUserAgent] = userAgent attributes[conventions.AttributeEnduserID] = enduser resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, errorMessage, attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, errorMessage, attributes) timeEvents := constructTimedEventsWithSentMessageEvent(span.StartTimestamp()) timeEvents.CopyTo(span.Events()) @@ -186,9 +187,9 @@ func TestServerSpanWithThrottle(t *testing.T) { func TestServerSpanNoParentId(t *testing.T) { spanName := "/api/locations" - parentSpanID := pdata.InvalidSpanID() + parentSpanID := pcommon.InvalidSpanID() resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeOk, "OK", nil) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeOk, "OK", nil) segment, _ := MakeSegment(span, resource, nil, false) @@ -196,15 +197,15 @@ func TestServerSpanNoParentId(t *testing.T) { } func TestSpanNoParentId(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("my-topic send") span.SetTraceID(newTraceID()) span.SetSpanID(newSegmentID()) - span.SetParentSpanID(pdata.InvalidSpanID()) - span.SetKind(pdata.SpanKindProducer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(time.Now())) - span.SetEndTimestamp(pdata.NewTimestampFromTime(time.Now().Add(10))) - resource := pdata.NewResource() + span.SetParentSpanID(pcommon.InvalidSpanID()) + span.SetKind(ptrace.SpanKindProducer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(10))) + resource := pcommon.NewResource() segment, _ := MakeSegment(span, resource, nil, false) assert.Empty(t, segment.ParentID) @@ -212,15 +213,15 @@ func TestSpanNoParentId(t *testing.T) { } func TestSpanWithNoStatus(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(newTraceID()) span.SetSpanID(newSegmentID()) span.SetParentSpanID(newSegmentID()) - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(time.Now())) - span.SetEndTimestamp(pdata.NewTimestampFromTime(time.Now().Add(10))) + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.Now().Add(10))) - resource := pdata.NewResource() + resource := pcommon.NewResource() segment, _ := MakeSegment(span, resource, nil, false) assert.NotNil(t, segment) } @@ -239,7 +240,7 @@ func TestClientSpanWithDbComponent(t *testing.T) { attributes[conventions.AttributeNetPeerPort] = "3306" attributes["enterprise.app.id"] = enterpriseAppID resource := constructDefaultResource() - span := constructClientSpan(parentSpanID, spanName, pdata.StatusCodeUnset, "OK", attributes) + span := constructClientSpan(parentSpanID, spanName, ptrace.StatusCodeUnset, "OK", attributes) segment, _ := MakeSegment(span, resource, nil, false) @@ -280,7 +281,7 @@ func TestClientSpanWithHttpHost(t *testing.T) { attributes[conventions.AttributeHTTPHost] = "foo.com" attributes[conventions.AttributeNetPeerName] = "bar.com" resource := constructDefaultResource() - span := constructClientSpan(parentSpanID, spanName, pdata.StatusCodeUnset, "OK", attributes) + span := constructClientSpan(parentSpanID, spanName, ptrace.StatusCodeUnset, "OK", attributes) segment, _ := MakeSegment(span, resource, nil, false) @@ -299,7 +300,7 @@ func TestClientSpanWithoutHttpHost(t *testing.T) { attributes[conventions.AttributeHTTPTarget] = "/" attributes[conventions.AttributeNetPeerName] = "bar.com" resource := constructDefaultResource() - span := constructClientSpan(parentSpanID, spanName, pdata.StatusCodeUnset, "OK", attributes) + span := constructClientSpan(parentSpanID, spanName, ptrace.StatusCodeUnset, "OK", attributes) segment, _ := MakeSegment(span, resource, nil, false) @@ -319,7 +320,7 @@ func TestClientSpanWithRpcHost(t *testing.T) { attributes[conventions.AttributeRPCService] = "com.foo.AnimalService" attributes[conventions.AttributeNetPeerName] = "bar.com" resource := constructDefaultResource() - span := constructClientSpan(parentSpanID, spanName, pdata.StatusCodeUnset, "OK", attributes) + span := constructClientSpan(parentSpanID, spanName, ptrace.StatusCodeUnset, "OK", attributes) segment, _ := MakeSegment(span, resource, nil, false) @@ -336,12 +337,12 @@ func TestSpanWithInvalidTraceId(t *testing.T) { attributes[conventions.AttributeNetPeerPort] = "9443" attributes[conventions.AttributeHTTPTarget] = spanName resource := constructDefaultResource() - span := constructClientSpan(pdata.InvalidSpanID(), spanName, pdata.StatusCodeUnset, "OK", attributes) + span := constructClientSpan(pcommon.InvalidSpanID(), spanName, ptrace.StatusCodeUnset, "OK", attributes) timeEvents := constructTimedEventsWithSentMessageEvent(span.StartTimestamp()) timeEvents.CopyTo(span.Events()) traceID := span.TraceID().Bytes() traceID[0] = 0x11 - span.SetTraceID(pdata.NewTraceID(traceID)) + span.SetTraceID(pcommon.NewTraceID(traceID)) _, err := MakeSegmentDocumentString(span, resource, nil, false) @@ -356,7 +357,7 @@ func TestSpanWithExpiredTraceId(t *testing.T) { tempTraceID := newTraceID().Bytes() binary.BigEndian.PutUint32(tempTraceID[0:4], uint32(ExpiredEpoch)) - _, err := convertToAmazonTraceID(pdata.NewTraceID(tempTraceID)) + _, err := convertToAmazonTraceID(pcommon.NewTraceID(tempTraceID)) assert.NotNil(t, err) } @@ -386,10 +387,10 @@ func TestServerSpanWithNilAttributes(t *testing.T) { parentSpanID := newSegmentID() attributes := make(map[string]interface{}) resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) timeEvents := constructTimedEventsWithSentMessageEvent(span.StartTimestamp()) timeEvents.CopyTo(span.Events()) - pdata.NewMap().CopyTo(span.Attributes()) + pcommon.NewMap().CopyTo(span.Attributes()) segment, _ := MakeSegment(span, resource, nil, false) @@ -406,7 +407,7 @@ func TestSpanWithAttributesDefaultNotIndexed(t *testing.T) { attributes["attr1@1"] = "val1" attributes["attr2@2"] = "val2" resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, nil, false) @@ -433,7 +434,7 @@ func TestSpanWithResourceNotStoredIfSubsegment(t *testing.T) { attributes["attr1@1"] = "val1" attributes["attr2@2"] = "val2" resource := constructDefaultResource() - span := constructClientSpan(parentSpanID, spanName, pdata.StatusCodeError, "ERROR", attributes) + span := constructClientSpan(parentSpanID, spanName, ptrace.StatusCodeError, "ERROR", attributes) segment, _ := MakeSegment(span, resource, nil, false) @@ -456,7 +457,7 @@ func TestSpanWithAttributesPartlyIndexed(t *testing.T) { attributes["attr1@1"] = "val1" attributes["attr2@2"] = "val2" resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{"attr1@1", "not_exist"}, false) @@ -473,7 +474,7 @@ func TestSpanWithAttributesAllIndexed(t *testing.T) { attributes["attr1@1"] = "val1" attributes["attr2@2"] = "val2" resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeOk, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeOk, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{"attr1@1", "not_exist"}, true) @@ -487,7 +488,7 @@ func TestResourceAttributesCanBeIndexed(t *testing.T) { parentSpanID := newSegmentID() attributes := make(map[string]interface{}) resource := constructDefaultResource() - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{ "otel.resource.string.key", @@ -519,7 +520,7 @@ func TestResourceAttributesNotIndexedIfSubsegment(t *testing.T) { parentSpanID := newSegmentID() attributes := make(map[string]interface{}) resource := constructDefaultResource() - span := constructClientSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructClientSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{ "otel.resource.string.key", @@ -539,12 +540,12 @@ func TestOriginNotAws(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderGCP) attrs.InsertString(conventions.AttributeHostID, "instance-123") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -556,13 +557,13 @@ func TestOriginEc2(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSEC2) attrs.InsertString(conventions.AttributeHostID, "instance-123") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -574,14 +575,14 @@ func TestOriginEcs(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSECS) attrs.InsertString(conventions.AttributeHostID, "instance-123") attrs.InsertString(conventions.AttributeContainerName, "container-123") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -593,15 +594,15 @@ func TestOriginEcsEc2(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSECS) attrs.InsertString(conventions.AttributeAWSECSLaunchtype, conventions.AttributeAWSECSLaunchtypeEC2) attrs.InsertString(conventions.AttributeHostID, "instance-123") attrs.InsertString(conventions.AttributeContainerName, "container-123") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -613,15 +614,15 @@ func TestOriginEcsFargate(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSECS) attrs.InsertString(conventions.AttributeAWSECSLaunchtype, conventions.AttributeAWSECSLaunchtypeFargate) attrs.InsertString(conventions.AttributeHostID, "instance-123") attrs.InsertString(conventions.AttributeContainerName, "container-123") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -633,15 +634,15 @@ func TestOriginEb(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSElasticBeanstalk) attrs.InsertString(conventions.AttributeHostID, "instance-123") attrs.InsertString(conventions.AttributeContainerName, "container-123") attrs.InsertString(conventions.AttributeServiceInstanceID, "service-123") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -656,8 +657,8 @@ func TestOriginEks(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSEKS) attrs.InsertString(conventions.AttributeCloudAccountID, "123456789") @@ -673,7 +674,7 @@ func TestOriginEks(t *testing.T) { attrs.InsertString(conventions.AttributeHostID, instanceID) attrs.InsertString(conventions.AttributeHostType, "m5.xlarge") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -685,12 +686,12 @@ func TestOriginAppRunner(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSAppRunner) attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -702,11 +703,11 @@ func TestOriginBlank(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -718,8 +719,8 @@ func TestOriginPrefersInfraService(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() - attrs := pdata.NewMap() + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAWSEC2) attrs.InsertString(conventions.AttributeK8SClusterName, "cluster-123") @@ -727,7 +728,7 @@ func TestOriginPrefersInfraService(t *testing.T) { attrs.InsertString(conventions.AttributeContainerName, "container-123") attrs.InsertString(conventions.AttributeServiceInstanceID, "service-123") attrs.CopyTo(resource.Attributes()) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -739,27 +740,27 @@ func TestFilteredAttributesMetadata(t *testing.T) { spanName := "/test" parentSpanID := newSegmentID() attributes := make(map[string]interface{}) - resource := pdata.NewResource() + resource := pcommon.NewResource() - attrs := pdata.NewMap() + attrs := pcommon.NewMap() attrs.InsertString("string_value", "value") attrs.InsertInt("int_value", 123) attrs.InsertDouble("float_value", 456.78) attrs.InsertBool("bool_value", false) attrs.InsertNull("null_value") - arrayValue := pdata.NewValueSlice() + arrayValue := pcommon.NewValueSlice() arrayValue.SliceVal().AppendEmpty().SetIntVal(12) arrayValue.SliceVal().AppendEmpty().SetIntVal(34) arrayValue.SliceVal().AppendEmpty().SetIntVal(56) attrs.Insert("array_value", arrayValue) - mapValue := pdata.NewValueMap() + mapValue := pcommon.NewValueMap() mapValue.MapVal().InsertDouble("value1", -987.65) mapValue.MapVal().InsertBool("value2", true) attrs.Insert("map_value", mapValue) - span := constructServerSpan(parentSpanID, spanName, pdata.StatusCodeError, "OK", attributes) + span := constructServerSpan(parentSpanID, spanName, ptrace.StatusCodeError, "OK", attributes) attrs.CopyTo(span.Attributes()) segment, _ := MakeSegment(span, resource, []string{}, false) @@ -777,7 +778,7 @@ func TestFilteredAttributesMetadata(t *testing.T) { }, segment.Metadata["default"]["map_value"]) } -func constructClientSpan(parentSpanID pdata.SpanID, name string, code pdata.StatusCode, message string, attributes map[string]interface{}) pdata.Span { +func constructClientSpan(parentSpanID pcommon.SpanID, name string, code ptrace.StatusCode, message string, attributes map[string]interface{}) ptrace.Span { var ( traceID = newTraceID() spanID = newSegmentID() @@ -786,16 +787,16 @@ func constructClientSpan(parentSpanID pdata.SpanID, name string, code pdata.Stat spanAttributes = constructSpanAttributes(attributes) ) - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(traceID) span.SetSpanID(spanID) span.SetParentSpanID(parentSpanID) span.SetName(name) - span.SetKind(pdata.SpanKindClient) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindClient) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() status.SetCode(code) status.SetMessage(message) status.CopyTo(span.Status()) @@ -804,7 +805,7 @@ func constructClientSpan(parentSpanID pdata.SpanID, name string, code pdata.Stat return span } -func constructServerSpan(parentSpanID pdata.SpanID, name string, code pdata.StatusCode, message string, attributes map[string]interface{}) pdata.Span { +func constructServerSpan(parentSpanID pcommon.SpanID, name string, code ptrace.StatusCode, message string, attributes map[string]interface{}) ptrace.Span { var ( traceID = newTraceID() spanID = newSegmentID() @@ -813,16 +814,16 @@ func constructServerSpan(parentSpanID pdata.SpanID, name string, code pdata.Stat spanAttributes = constructSpanAttributes(attributes) ) - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetTraceID(traceID) span.SetSpanID(spanID) span.SetParentSpanID(parentSpanID) span.SetName(name) - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() status.SetCode(code) status.SetMessage(message) status.CopyTo(span.Status()) @@ -831,8 +832,8 @@ func constructServerSpan(parentSpanID pdata.SpanID, name string, code pdata.Stat return span } -func constructSpanAttributes(attributes map[string]interface{}) pdata.Map { - attrs := pdata.NewMap() +func constructSpanAttributes(attributes map[string]interface{}) pcommon.Map { + attrs := pcommon.NewMap() for key, value := range attributes { if cast, ok := value.(int); ok { attrs.InsertInt(key, int64(cast)) @@ -845,9 +846,9 @@ func constructSpanAttributes(attributes map[string]interface{}) pdata.Map { return attrs } -func constructDefaultResource() pdata.Resource { - resource := pdata.NewResource() - attrs := pdata.NewMap() +func constructDefaultResource() pcommon.Resource { + resource := pcommon.NewResource() + attrs := pcommon.NewMap() attrs.InsertString(conventions.AttributeServiceName, "signup_aggregator") attrs.InsertString(conventions.AttributeServiceVersion, "semver:1.1.4") attrs.InsertString(conventions.AttributeContainerName, "signup_aggregator") @@ -866,13 +867,13 @@ func constructDefaultResource() pdata.Resource { attrs.InsertDouble(resourceDoubleKey, 5.0) attrs.InsertBool(resourceBoolKey, true) - resourceMapVal := pdata.NewValueMap() + resourceMapVal := pcommon.NewValueMap() resourceMap := resourceMapVal.MapVal() resourceMap.InsertInt("key1", 1) resourceMap.InsertString("key2", "value") attrs.Insert(resourceMapKey, resourceMapVal) - resourceArrayVal := pdata.NewValueSlice() + resourceArrayVal := pcommon.NewValueSlice() resourceArray := resourceArrayVal.SliceVal() resourceArray.AppendEmpty().SetStringVal("foo") resourceArray.AppendEmpty().SetStringVal("bar") @@ -881,41 +882,41 @@ func constructDefaultResource() pdata.Resource { return resource } -func constructTimedEventsWithReceivedMessageEvent(tm pdata.Timestamp) pdata.SpanEventSlice { - eventAttr := pdata.NewMap() +func constructTimedEventsWithReceivedMessageEvent(tm pcommon.Timestamp) ptrace.SpanEventSlice { + eventAttr := pcommon.NewMap() eventAttr.InsertString("message.type", "RECEIVED") eventAttr.InsertInt(conventions.AttributeMessagingMessageID, 1) eventAttr.InsertInt(conventions.AttributeMessagingMessagePayloadCompressedSizeBytes, 6478) eventAttr.InsertInt(conventions.AttributeMessagingMessagePayloadSizeBytes, 12452) - event := pdata.NewSpanEvent() + event := ptrace.NewSpanEvent() event.SetTimestamp(tm) eventAttr.CopyTo(event.Attributes()) event.SetDroppedAttributesCount(0) - events := pdata.NewSpanEventSlice() + events := ptrace.NewSpanEventSlice() event.CopyTo(events.AppendEmpty()) return events } -func constructTimedEventsWithSentMessageEvent(tm pdata.Timestamp) pdata.SpanEventSlice { - eventAttr := pdata.NewMap() +func constructTimedEventsWithSentMessageEvent(tm pcommon.Timestamp) ptrace.SpanEventSlice { + eventAttr := pcommon.NewMap() eventAttr.InsertString("message.type", "SENT") eventAttr.InsertInt(conventions.AttributeMessagingMessageID, 1) eventAttr.InsertInt(conventions.AttributeMessagingMessagePayloadSizeBytes, 7480) - event := pdata.NewSpanEvent() + event := ptrace.NewSpanEvent() event.SetTimestamp(tm) eventAttr.CopyTo(event.Attributes()) event.SetDroppedAttributesCount(0) - events := pdata.NewSpanEventSlice() + events := ptrace.NewSpanEventSlice() event.CopyTo(events.AppendEmpty()) return events } // newTraceID generates a new valid X-Ray TraceID -func newTraceID() pdata.TraceID { +func newTraceID() pcommon.TraceID { var r [16]byte epoch := time.Now().Unix() binary.BigEndian.PutUint32(r[0:4], uint32(epoch)) @@ -923,5 +924,5 @@ func newTraceID() pdata.TraceID { if err != nil { panic(err) } - return pdata.NewTraceID(r) + return pcommon.NewTraceID(r) } diff --git a/exporter/awsxrayexporter/internal/translator/service.go b/exporter/awsxrayexporter/internal/translator/service.go index a452529b2593..e3b7e233c193 100644 --- a/exporter/awsxrayexporter/internal/translator/service.go +++ b/exporter/awsxrayexporter/internal/translator/service.go @@ -15,13 +15,13 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator" import ( - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func makeService(resource pdata.Resource) *awsxray.ServiceData { +func makeService(resource pcommon.Resource) *awsxray.ServiceData { var service *awsxray.ServiceData verStr, ok := resource.Attributes().Get(conventions.AttributeServiceVersion) diff --git a/exporter/awsxrayexporter/internal/translator/service_test.go b/exporter/awsxrayexporter/internal/translator/service_test.go index a610158aee29..121464e12696 100644 --- a/exporter/awsxrayexporter/internal/translator/service_test.go +++ b/exporter/awsxrayexporter/internal/translator/service_test.go @@ -19,8 +19,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestServiceFromResource(t *testing.T) { @@ -54,7 +54,7 @@ func TestServiceFromResourceWithNoServiceVersion(t *testing.T) { } func TestServiceFromNullResource(t *testing.T) { - service := makeService(pdata.NewResource()) + service := makeService(pcommon.NewResource()) assert.Nil(t, service) } diff --git a/exporter/awsxrayexporter/internal/translator/sql.go b/exporter/awsxrayexporter/internal/translator/sql.go index 05c323ec0282..9e3e11011ab8 100644 --- a/exporter/awsxrayexporter/internal/translator/sql.go +++ b/exporter/awsxrayexporter/internal/translator/sql.go @@ -15,15 +15,15 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/awsxrayexporter/internal/translator" import ( - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func makeSQL(attributes map[string]pdata.Value) (map[string]pdata.Value, *awsxray.SQLData) { +func makeSQL(attributes map[string]pcommon.Value) (map[string]pcommon.Value, *awsxray.SQLData) { var ( - filtered = make(map[string]pdata.Value) + filtered = make(map[string]pcommon.Value) sqlData awsxray.SQLData dbURL string dbSystem string diff --git a/exporter/awsxrayexporter/internal/translator/sql_test.go b/exporter/awsxrayexporter/internal/translator/sql_test.go index d864e474aced..b7a97cf41d2a 100644 --- a/exporter/awsxrayexporter/internal/translator/sql_test.go +++ b/exporter/awsxrayexporter/internal/translator/sql_test.go @@ -19,19 +19,19 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestClientSpanWithStatementAttribute(t *testing.T) { - attributes := make(map[string]pdata.Value) - attributes[conventions.AttributeDBSystem] = pdata.NewValueString("mysql") - attributes[conventions.AttributeDBName] = pdata.NewValueString("customers") - attributes[conventions.AttributeDBStatement] = pdata.NewValueString("SELECT * FROM user WHERE user_id = ?") - attributes[conventions.AttributeDBUser] = pdata.NewValueString("readonly_user") - attributes[conventions.AttributeDBConnectionString] = pdata.NewValueString("mysql://db.example.com:3306") - attributes[conventions.AttributeNetPeerName] = pdata.NewValueString("db.example.com") - attributes[conventions.AttributeNetPeerPort] = pdata.NewValueString("3306") + attributes := make(map[string]pcommon.Value) + attributes[conventions.AttributeDBSystem] = pcommon.NewValueString("mysql") + attributes[conventions.AttributeDBName] = pcommon.NewValueString("customers") + attributes[conventions.AttributeDBStatement] = pcommon.NewValueString("SELECT * FROM user WHERE user_id = ?") + attributes[conventions.AttributeDBUser] = pcommon.NewValueString("readonly_user") + attributes[conventions.AttributeDBConnectionString] = pcommon.NewValueString("mysql://db.example.com:3306") + attributes[conventions.AttributeNetPeerName] = pcommon.NewValueString("db.example.com") + attributes[conventions.AttributeNetPeerPort] = pcommon.NewValueString("3306") filtered, sqlData := makeSQL(attributes) @@ -48,14 +48,14 @@ func TestClientSpanWithStatementAttribute(t *testing.T) { } func TestClientSpanWithNonSQLDatabase(t *testing.T) { - attributes := make(map[string]pdata.Value) - attributes[conventions.AttributeDBSystem] = pdata.NewValueString("redis") - attributes[conventions.AttributeDBName] = pdata.NewValueString("0") - attributes[conventions.AttributeDBStatement] = pdata.NewValueString("SET key value") - attributes[conventions.AttributeDBUser] = pdata.NewValueString("readonly_user") - attributes[conventions.AttributeDBConnectionString] = pdata.NewValueString("redis://db.example.com:3306") - attributes[conventions.AttributeNetPeerName] = pdata.NewValueString("db.example.com") - attributes[conventions.AttributeNetPeerPort] = pdata.NewValueString("3306") + attributes := make(map[string]pcommon.Value) + attributes[conventions.AttributeDBSystem] = pcommon.NewValueString("redis") + attributes[conventions.AttributeDBName] = pcommon.NewValueString("0") + attributes[conventions.AttributeDBStatement] = pcommon.NewValueString("SET key value") + attributes[conventions.AttributeDBUser] = pcommon.NewValueString("readonly_user") + attributes[conventions.AttributeDBConnectionString] = pcommon.NewValueString("redis://db.example.com:3306") + attributes[conventions.AttributeNetPeerName] = pcommon.NewValueString("db.example.com") + attributes[conventions.AttributeNetPeerPort] = pcommon.NewValueString("3306") filtered, sqlData := makeSQL(attributes) assert.Nil(t, sqlData) @@ -63,14 +63,14 @@ func TestClientSpanWithNonSQLDatabase(t *testing.T) { } func TestClientSpanWithoutDBurlAttribute(t *testing.T) { - attributes := make(map[string]pdata.Value) - attributes[conventions.AttributeDBSystem] = pdata.NewValueString("postgresql") - attributes[conventions.AttributeDBName] = pdata.NewValueString("customers") - attributes[conventions.AttributeDBStatement] = pdata.NewValueString("SELECT * FROM user WHERE user_id = ?") - attributes[conventions.AttributeDBUser] = pdata.NewValueString("readonly_user") - attributes[conventions.AttributeDBConnectionString] = pdata.NewValueString("") - attributes[conventions.AttributeNetPeerName] = pdata.NewValueString("db.example.com") - attributes[conventions.AttributeNetPeerPort] = pdata.NewValueString("3306") + attributes := make(map[string]pcommon.Value) + attributes[conventions.AttributeDBSystem] = pcommon.NewValueString("postgresql") + attributes[conventions.AttributeDBName] = pcommon.NewValueString("customers") + attributes[conventions.AttributeDBStatement] = pcommon.NewValueString("SELECT * FROM user WHERE user_id = ?") + attributes[conventions.AttributeDBUser] = pcommon.NewValueString("readonly_user") + attributes[conventions.AttributeDBConnectionString] = pcommon.NewValueString("") + attributes[conventions.AttributeNetPeerName] = pcommon.NewValueString("db.example.com") + attributes[conventions.AttributeNetPeerPort] = pcommon.NewValueString("3306") filtered, sqlData := makeSQL(attributes) assert.NotNil(t, filtered) assert.NotNil(t, sqlData) diff --git a/exporter/awsxrayexporter/internal/translator/writer_pool_test.go b/exporter/awsxrayexporter/internal/translator/writer_pool_test.go index 6d7d80502798..31e7958cd6b3 100644 --- a/exporter/awsxrayexporter/internal/translator/writer_pool_test.go +++ b/exporter/awsxrayexporter/internal/translator/writer_pool_test.go @@ -20,8 +20,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -35,7 +36,7 @@ func TestWriterPoolBasic(t *testing.T) { assert.NotNil(t, w.encoder) assert.Equal(t, size, w.buffer.Cap()) assert.Equal(t, 0, w.buffer.Len()) - resource := pdata.NewResource() + resource := pcommon.NewResource() segment, _ := MakeSegment(span, resource, nil, false) if err := w.Encode(*segment); err != nil { assert.Fail(t, "invalid json") @@ -53,7 +54,7 @@ func BenchmarkWithoutPool(b *testing.B) { b.StartTimer() buffer := bytes.NewBuffer(make([]byte, 0, 2048)) encoder := json.NewEncoder(buffer) - segment, _ := MakeSegment(span, pdata.NewResource(), nil, false) + segment, _ := MakeSegment(span, pcommon.NewResource(), nil, false) encoder.Encode(*segment) logger.Info(buffer.String()) } @@ -67,13 +68,13 @@ func BenchmarkWithPool(b *testing.B) { span := constructWriterPoolSpan() b.StartTimer() w := wp.borrow() - segment, _ := MakeSegment(span, pdata.NewResource(), nil, false) + segment, _ := MakeSegment(span, pcommon.NewResource(), nil, false) w.Encode(*segment) logger.Info(w.String()) } } -func constructWriterPoolSpan() pdata.Span { +func constructWriterPoolSpan() ptrace.Span { attributes := make(map[string]interface{}) attributes[conventions.AttributeHTTPMethod] = "GET" attributes[conventions.AttributeHTTPURL] = "https://api.example.com/users/junit" diff --git a/exporter/azuremonitorexporter/conventions.go b/exporter/azuremonitorexporter/conventions.go index 27dd4a5122a6..b647c476f4c0 100644 --- a/exporter/azuremonitorexporter/conventions.go +++ b/exporter/azuremonitorexporter/conventions.go @@ -17,8 +17,8 @@ package azuremonitorexporter // import "github.com/open-telemetry/opentelemetry- import ( "strconv" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) /* @@ -45,7 +45,7 @@ type NetworkAttributes struct { } // MapAttribute attempts to map a Span attribute to one of the known types -func (attrs *NetworkAttributes) MapAttribute(k string, v pdata.Value) bool { +func (attrs *NetworkAttributes) MapAttribute(k string, v pcommon.Value) bool { switch k { case conventions.AttributeNetTransport: attrs.NetTransport = v.StringVal() @@ -98,7 +98,7 @@ type HTTPAttributes struct { } // MapAttribute attempts to map a Span attribute to one of the known types -func (attrs *HTTPAttributes) MapAttribute(k string, v pdata.Value) bool { +func (attrs *HTTPAttributes) MapAttribute(k string, v pcommon.Value) bool { switch k { case conventions.AttributeHTTPMethod: attrs.HTTPMethod = v.StringVal() @@ -161,7 +161,7 @@ type RPCAttributes struct { } // MapAttribute attempts to map a Span attribute to one of the known types -func (attrs *RPCAttributes) MapAttribute(k string, v pdata.Value) bool { +func (attrs *RPCAttributes) MapAttribute(k string, v pcommon.Value) bool { switch k { case conventions.AttributeRPCSystem: attrs.RPCSystem = v.StringVal() @@ -196,7 +196,7 @@ type DatabaseAttributes struct { } // MapAttribute attempts to map a Span attribute to one of the known types -func (attrs *DatabaseAttributes) MapAttribute(k string, v pdata.Value) bool { +func (attrs *DatabaseAttributes) MapAttribute(k string, v pcommon.Value) bool { switch k { case conventions.AttributeDBSystem: attrs.DBSystem = v.StringVal() @@ -245,7 +245,7 @@ type MessagingAttributes struct { } // MapAttribute attempts to map a Span attribute to one of the known types -func (attrs *MessagingAttributes) MapAttribute(k string, v pdata.Value) bool { +func (attrs *MessagingAttributes) MapAttribute(k string, v pcommon.Value) bool { switch k { case conventions.AttributeMessagingSystem: attrs.MessagingSystem = v.StringVal() @@ -283,14 +283,14 @@ func (attrs *MessagingAttributes) MapAttribute(k string, v pdata.Value) bool { } // Tries to return the value of the attribute as an int64 -func getAttributeValueAsInt(attributeValue pdata.Value) (int64, error) { +func getAttributeValueAsInt(attributeValue pcommon.Value) (int64, error) { switch attributeValue.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: // try to cast the string values to int64 if val, err := strconv.Atoi(attributeValue.StringVal()); err == nil { return int64(val), nil } - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return attributeValue.IntVal(), nil } diff --git a/exporter/azuremonitorexporter/conventions_test.go b/exporter/azuremonitorexporter/conventions_test.go index 61829e668641..15e7b3c0ebbc 100644 --- a/exporter/azuremonitorexporter/conventions_test.go +++ b/exporter/azuremonitorexporter/conventions_test.go @@ -18,8 +18,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestHTTPAttributeMapping(t *testing.T) { @@ -45,7 +45,7 @@ func TestHTTPAttributeMapping(t *testing.T) { conventions.AttributeHTTPClientIP: conventions.AttributeHTTPClientIP, } - attributeMap := pdata.NewMapFromRaw(httpAttributeValues) + attributeMap := pcommon.NewMapFromRaw(httpAttributeValues) // Add all the network attributes appendToAttributeMap(attributeMap, getNetworkAttributes()) @@ -80,7 +80,7 @@ func TestRPCPAttributeMapping(t *testing.T) { conventions.AttributeRPCMethod: conventions.AttributeRPCMethod, } - attributeMap := pdata.NewMapFromRaw(rpcAttributeValues) + attributeMap := pcommon.NewMapFromRaw(rpcAttributeValues) // Add all the network attributes appendToAttributeMap(attributeMap, getNetworkAttributes()) @@ -110,7 +110,7 @@ func TestDatabaseAttributeMapping(t *testing.T) { conventions.AttributeDBMongoDBCollection: conventions.AttributeDBMongoDBCollection, } - attributeMap := pdata.NewMapFromRaw(databaseAttributeValues) + attributeMap := pcommon.NewMapFromRaw(databaseAttributeValues) // Add all the network attributes appendToAttributeMap(attributeMap, getNetworkAttributes()) @@ -147,7 +147,7 @@ func TestMessagingAttributeMapping(t *testing.T) { conventions.AttributeMessagingOperation: conventions.AttributeMessagingOperation, } - attributeMap := pdata.NewMapFromRaw(messagingAttributeValues) + attributeMap := pcommon.NewMapFromRaw(messagingAttributeValues) // Add all the network attributes appendToAttributeMap(attributeMap, getNetworkAttributes()) @@ -177,7 +177,7 @@ func TestAttributeMappingWithSomeBadValues(t *testing.T) { conventions.AttributeNetPeerPort: "xx", } - attributeMap := pdata.NewMapFromRaw(values) + attributeMap := pcommon.NewMapFromRaw(values) attrs := &NetworkAttributes{} attributeMap.Range(attrs.MapAttribute) @@ -186,8 +186,8 @@ func TestAttributeMappingWithSomeBadValues(t *testing.T) { assert.Equal(t, int64(0), attrs.NetPeerPort) } -func getNetworkAttributes() pdata.Map { - return pdata.NewMapFromRaw(map[string]interface{}{ +func getNetworkAttributes() pcommon.Map { + return pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeNetTransport: conventions.AttributeNetTransport, conventions.AttributeNetPeerIP: conventions.AttributeNetPeerIP, conventions.AttributeNetPeerPort: 1, diff --git a/exporter/azuremonitorexporter/go.mod b/exporter/azuremonitorexporter/go.mod index a25addbc6a8f..8f17751a0f3b 100644 --- a/exporter/azuremonitorexporter/go.mod +++ b/exporter/azuremonitorexporter/go.mod @@ -5,21 +5,21 @@ go 1.17 require ( github.com/microsoft/ApplicationInsights-Go v0.4.4 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d + golang.org/x/net v0.0.0-20220225172249-27dd8689420f ) require ( code.cloudfoundry.org/clock v1.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gofrs/uuid v4.0.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -27,7 +27,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.2.0 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -35,12 +34,9 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/azuremonitorexporter/go.sum b/exporter/azuremonitorexporter/go.sum index c6b8f1b5c854..1dbbe52cb88f 100644 --- a/exporter/azuremonitorexporter/go.sum +++ b/exporter/azuremonitorexporter/go.sum @@ -1,11 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= code.cloudfoundry.org/clock v0.0.0-20180518195852-02e53af36e6c/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -21,19 +18,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -41,9 +30,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -52,7 +38,6 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -72,18 +57,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -93,13 +74,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -130,8 +108,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -178,15 +156,11 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -194,7 +168,6 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -205,20 +178,21 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -243,20 +217,17 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -273,22 +244,18 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -309,22 +276,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -334,11 +295,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -349,8 +306,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/azuremonitorexporter/log_to_envelope.go b/exporter/azuremonitorexporter/log_to_envelope.go index bda22b54266a..f1742bd9e15c 100644 --- a/exporter/azuremonitorexporter/log_to_envelope.go +++ b/exporter/azuremonitorexporter/log_to_envelope.go @@ -18,7 +18,7 @@ import ( "time" "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -40,7 +40,7 @@ type logPacker struct { logger *zap.Logger } -func (packer *logPacker) LogRecordToEnvelope(logRecord pdata.LogRecord) *contracts.Envelope { +func (packer *logPacker) LogRecordToEnvelope(logRecord plog.LogRecord) *contracts.Envelope { envelope := contracts.NewEnvelope() envelope.Tags = make(map[string]string) envelope.Time = toTime(logRecord.Timestamp()).Format(time.RFC3339Nano) diff --git a/exporter/azuremonitorexporter/logexporter.go b/exporter/azuremonitorexporter/logexporter.go index 6ff46fd72813..65c699606a3c 100644 --- a/exporter/azuremonitorexporter/logexporter.go +++ b/exporter/azuremonitorexporter/logexporter.go @@ -19,7 +19,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -29,7 +29,7 @@ type logExporter struct { logger *zap.Logger } -func (exporter *logExporter) onLogData(context context.Context, logData pdata.Logs) error { +func (exporter *logExporter) onLogData(context context.Context, logData plog.Logs) error { resourceLogs := logData.ResourceLogs() logPacker := newLogPacker(exporter.logger) diff --git a/exporter/azuremonitorexporter/logexporter_test.go b/exporter/azuremonitorexporter/logexporter_test.go index 25bae9e21d05..deb9137035cd 100644 --- a/exporter/azuremonitorexporter/logexporter_test.go +++ b/exporter/azuremonitorexporter/logexporter_test.go @@ -26,8 +26,7 @@ import ( "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -101,15 +100,15 @@ func getLogPacker() *logPacker { return newLogPacker(zap.NewNop()) } -func getTestLogs(tb testing.TB) pdata.Logs { - logsMarshaler := otlp.NewJSONLogsUnmarshaler() +func getTestLogs(tb testing.TB) plog.Logs { + logsMarshaler := plog.NewJSONUnmarshaler() logs, err := logsMarshaler.UnmarshalLogs(testLogs) assert.NoError(tb, err, "Can't unmarshal testing logs data -> %s", err) return logs } -func getTestLogRecord(tb testing.TB) pdata.LogRecord { - var logRecord pdata.LogRecord +func getTestLogRecord(tb testing.TB) plog.LogRecord { + var logRecord plog.LogRecord logs := getTestLogs(tb) resourceLogs := logs.ResourceLogs() scopeLogs := resourceLogs.At(0).ScopeLogs() diff --git a/exporter/azuremonitorexporter/time_utils.go b/exporter/azuremonitorexporter/time_utils.go index 97fb43f6f825..d2044af6bbb1 100644 --- a/exporter/azuremonitorexporter/time_utils.go +++ b/exporter/azuremonitorexporter/time_utils.go @@ -19,10 +19,10 @@ import ( "fmt" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) -func toTime(t pdata.Timestamp) time.Time { +func toTime(t pcommon.Timestamp) time.Time { return time.Unix(0, int64(t)) } diff --git a/exporter/azuremonitorexporter/time_utils_test.go b/exporter/azuremonitorexporter/time_utils_test.go index 57422f830d2f..c490647431b8 100644 --- a/exporter/azuremonitorexporter/time_utils_test.go +++ b/exporter/azuremonitorexporter/time_utils_test.go @@ -19,12 +19,12 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestToTime(t *testing.T) { // 61 seconds after the Unix epoch of 1970-01-01T00:00:00Z - input := pdata.Timestamp(60000000001) + input := pcommon.Timestamp(60000000001) output := toTime(input) assert.NotNil(t, output) diff --git a/exporter/azuremonitorexporter/trace_to_envelope.go b/exporter/azuremonitorexporter/trace_to_envelope.go index 818ed5bc8e17..5a26091980a6 100644 --- a/exporter/azuremonitorexporter/trace_to_envelope.go +++ b/exporter/azuremonitorexporter/trace_to_envelope.go @@ -23,8 +23,9 @@ import ( "time" "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -48,20 +49,20 @@ var ( // Used to identify the type of a received Span type spanType int8 -// Transforms a tuple of pdata.Resource, pdata.InstrumentationScope, pdata.Span into an AppInsights contracts.Envelope +// Transforms a tuple of pcommon.Resource, pcommon.InstrumentationScope, ptrace.Span into an AppInsights contracts.Envelope // This is the only method that should be targeted in the unit tests func spanToEnvelope( - resource pdata.Resource, - instrumentationScope pdata.InstrumentationScope, - span pdata.Span, + resource pcommon.Resource, + instrumentationScope pcommon.InstrumentationScope, + span ptrace.Span, logger *zap.Logger) (*contracts.Envelope, error) { spanKind := span.Kind() // According to the SpanKind documentation, we can assume it to be INTERNAL // when we get UNSPECIFIED. - if spanKind == pdata.SpanKindUnspecified { - spanKind = pdata.SpanKindInternal + if spanKind == ptrace.SpanKindUnspecified { + spanKind = ptrace.SpanKindInternal } attributeMap := span.Attributes() @@ -82,7 +83,7 @@ func spanToEnvelope( var dataSanitizeFunc func() []string var dataProperties map[string]string - if spanKind == pdata.SpanKindServer || spanKind == pdata.SpanKindConsumer { + if spanKind == ptrace.SpanKindServer || spanKind == ptrace.SpanKindConsumer { requestData := spanToRequestData(span, incomingSpanType) dataProperties = requestData.Properties dataSanitizeFunc = requestData.Sanitize @@ -90,11 +91,11 @@ func spanToEnvelope( envelope.Tags[contracts.OperationName] = requestData.Name data.BaseData = requestData data.BaseType = requestData.BaseType() - } else if spanKind == pdata.SpanKindClient || spanKind == pdata.SpanKindProducer || spanKind == pdata.SpanKindInternal { + } else if spanKind == ptrace.SpanKindClient || spanKind == ptrace.SpanKindProducer || spanKind == ptrace.SpanKindInternal { remoteDependencyData := spanToRemoteDependencyData(span, incomingSpanType) // Regardless of the detected Span type, if the SpanKind is Internal we need to set data.Type to InProc - if spanKind == pdata.SpanKindInternal { + if spanKind == ptrace.SpanKindInternal { remoteDependencyData.Type = "InProc" } @@ -116,7 +117,7 @@ func spanToEnvelope( resourceAttributes := resource.Attributes() // Copy all the resource labels into the base data properties. Resource values are always strings - resourceAttributes.Range(func(k string, v pdata.Value) bool { + resourceAttributes.Range(func(k string, v pcommon.Value) bool { dataProperties[k] = v.StringVal() return true }) @@ -155,7 +156,7 @@ func spanToEnvelope( } // Maps Server/Consumer Span to AppInsights RequestData -func spanToRequestData(span pdata.Span, incomingSpanType spanType) *contracts.RequestData { +func spanToRequestData(span ptrace.Span, incomingSpanType spanType) *contracts.RequestData { // See https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/requestdata.go // Start with some reasonable default for server spans. data := contracts.NewRequestData() @@ -181,7 +182,7 @@ func spanToRequestData(span pdata.Span, incomingSpanType spanType) *contracts.Re } // Maps Span to AppInsights RemoteDependencyData -func spanToRemoteDependencyData(span pdata.Span, incomingSpanType spanType) *contracts.RemoteDependencyData { +func spanToRemoteDependencyData(span ptrace.Span, incomingSpanType spanType) *contracts.RemoteDependencyData { // https://github.com/microsoft/ApplicationInsights-Go/blob/master/appinsights/contracts/remotedependencydata.go // Start with some reasonable default for dependent spans. data := contracts.NewRemoteDependencyData() @@ -215,7 +216,7 @@ func getFormattedHTTPStatusValues(statusCode int64) (statusAsString string, succ // Maps HTTP Server Span to AppInsights RequestData // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#semantic-conventions-for-http-spans -func fillRequestDataHTTP(span pdata.Span, data *contracts.RequestData) { +func fillRequestDataHTTP(span ptrace.Span, data *contracts.RequestData) { attrs := copyAndExtractHTTPAttributes(span.Attributes(), data.Properties, data.Measurements) if attrs.HTTPStatusCode != 0 { @@ -301,7 +302,7 @@ func fillRequestDataHTTP(span pdata.Span, data *contracts.RequestData) { // Maps HTTP Client Span to AppInsights RemoteDependencyData // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md -func fillRemoteDependencyDataHTTP(span pdata.Span, data *contracts.RemoteDependencyData) { +func fillRemoteDependencyDataHTTP(span ptrace.Span, data *contracts.RemoteDependencyData) { attrs := copyAndExtractHTTPAttributes(span.Attributes(), data.Properties, data.Measurements) data.Type = "HTTP" @@ -388,7 +389,7 @@ func fillRemoteDependencyDataHTTP(span pdata.Span, data *contracts.RemoteDepende // Maps RPC Server Span to AppInsights RequestData // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md -func fillRequestDataRPC(span pdata.Span, data *contracts.RequestData) { +func fillRequestDataRPC(span ptrace.Span, data *contracts.RequestData) { attrs := copyAndExtractRPCAttributes(span.Attributes(), data.Properties, data.Measurements) data.ResponseCode = getRPCStatusCodeAsString(attrs) @@ -414,7 +415,7 @@ func fillRequestDataRPC(span pdata.Span, data *contracts.RequestData) { // Maps RPC Client Span to AppInsights RemoteDependencyData // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md -func fillRemoteDependencyDataRPC(span pdata.Span, data *contracts.RemoteDependencyData) { +func fillRemoteDependencyDataRPC(span ptrace.Span, data *contracts.RemoteDependencyData) { attrs := copyAndExtractRPCAttributes(span.Attributes(), data.Properties, data.Measurements) data.ResultCode = getRPCStatusCodeAsString(attrs) @@ -440,7 +441,7 @@ func getRPCStatusCodeAsString(rpcAttributes *RPCAttributes) (statusCodeAsString // Maps Database Client Span to AppInsights RemoteDependencyData // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md -func fillRemoteDependencyDataDatabase(span pdata.Span, data *contracts.RemoteDependencyData) { +func fillRemoteDependencyDataDatabase(span ptrace.Span, data *contracts.RemoteDependencyData) { attrs := copyAndExtractDatabaseAttributes(span.Attributes(), data.Properties, data.Measurements) data.Type = attrs.DBSystem @@ -458,7 +459,7 @@ func fillRemoteDependencyDataDatabase(span pdata.Span, data *contracts.RemoteDep // Maps Messaging Consumer/Server Span to AppInsights RequestData // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md -func fillRequestDataMessaging(span pdata.Span, data *contracts.RequestData) { +func fillRequestDataMessaging(span ptrace.Span, data *contracts.RequestData) { attrs := copyAndExtractMessagingAttributes(span.Attributes(), data.Properties, data.Measurements) // TODO Understand how to map attributes to RequestData fields @@ -473,7 +474,7 @@ func fillRequestDataMessaging(span pdata.Span, data *contracts.RequestData) { // Maps Messaging Producer/Client Span to AppInsights RemoteDependencyData // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/messaging.md -func fillRemoteDependencyDataMessaging(span pdata.Span, data *contracts.RemoteDependencyData) { +func fillRemoteDependencyDataMessaging(span ptrace.Span, data *contracts.RemoteDependencyData) { attrs := copyAndExtractMessagingAttributes(span.Attributes(), data.Properties, data.Measurements) // TODO Understand how to map attributes to RemoteDependencyData fields @@ -491,12 +492,12 @@ func fillRemoteDependencyDataMessaging(span pdata.Span, data *contracts.RemoteDe // Copies all attributes to either properties or measurements and passes the key/value to another mapping function func copyAndMapAttributes( - attributeMap pdata.Map, + attributeMap pcommon.Map, properties map[string]string, measurements map[string]float64, - mappingFunc func(k string, v pdata.Value)) { + mappingFunc func(k string, v pcommon.Value)) { - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { setAttributeValueAsPropertyOrMeasurement(k, v, properties, measurements) if mappingFunc != nil { mappingFunc(k, v) @@ -507,7 +508,7 @@ func copyAndMapAttributes( // Copies all attributes to either properties or measurements without any kind of mapping to a known set of attributes func copyAttributesWithoutMapping( - attributeMap pdata.Map, + attributeMap pcommon.Map, properties map[string]string, measurements map[string]float64) { @@ -516,7 +517,7 @@ func copyAttributesWithoutMapping( // Attribute extraction logic for HTTP Span attributes func copyAndExtractHTTPAttributes( - attributeMap pdata.Map, + attributeMap pcommon.Map, properties map[string]string, measurements map[string]float64) *HTTPAttributes { @@ -525,14 +526,14 @@ func copyAndExtractHTTPAttributes( attributeMap, properties, measurements, - func(k string, v pdata.Value) { attrs.MapAttribute(k, v) }) + func(k string, v pcommon.Value) { attrs.MapAttribute(k, v) }) return attrs } // Attribute extraction logic for RPC Span attributes func copyAndExtractRPCAttributes( - attributeMap pdata.Map, + attributeMap pcommon.Map, properties map[string]string, measurements map[string]float64) *RPCAttributes { @@ -541,14 +542,14 @@ func copyAndExtractRPCAttributes( attributeMap, properties, measurements, - func(k string, v pdata.Value) { attrs.MapAttribute(k, v) }) + func(k string, v pcommon.Value) { attrs.MapAttribute(k, v) }) return attrs } // Attribute extraction logic for Database Span attributes func copyAndExtractDatabaseAttributes( - attributeMap pdata.Map, + attributeMap pcommon.Map, properties map[string]string, measurements map[string]float64) *DatabaseAttributes { @@ -557,14 +558,14 @@ func copyAndExtractDatabaseAttributes( attributeMap, properties, measurements, - func(k string, v pdata.Value) { attrs.MapAttribute(k, v) }) + func(k string, v pcommon.Value) { attrs.MapAttribute(k, v) }) return attrs } // Attribute extraction logic for Messaging Span attributes func copyAndExtractMessagingAttributes( - attributeMap pdata.Map, + attributeMap pcommon.Map, properties map[string]string, measurements map[string]float64) *MessagingAttributes { @@ -573,19 +574,19 @@ func copyAndExtractMessagingAttributes( attributeMap, properties, measurements, - func(k string, v pdata.Value) { attrs.MapAttribute(k, v) }) + func(k string, v pcommon.Value) { attrs.MapAttribute(k, v) }) return attrs } -func formatSpanDuration(span pdata.Span) string { +func formatSpanDuration(span ptrace.Span) string { startTime := toTime(span.StartTimestamp()) endTime := toTime(span.EndTimestamp()) return formatDuration(endTime.Sub(startTime)) } // Maps incoming Span to a type defined in the specification -func mapIncomingSpanToType(attributeMap pdata.Map) spanType { +func mapIncomingSpanToType(attributeMap pcommon.Map) spanType { // No attributes if attributeMap.Len() == 0 { return unknownSpanType @@ -619,10 +620,10 @@ func mapIncomingSpanToType(attributeMap pdata.Map) spanType { } // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status -func getDefaultFormattedSpanStatus(spanStatus pdata.SpanStatus) (statusCodeAsString string, success bool) { +func getDefaultFormattedSpanStatus(spanStatus ptrace.SpanStatus) (statusCodeAsString string, success bool) { code := spanStatus.Code() - return strconv.FormatInt(int64(code), 10), code != pdata.StatusCodeError + return strconv.FormatInt(int64(code), 10), code != ptrace.StatusCodeError } func writeFormattedPeerAddressFromNetworkAttributes(networkAttributes *NetworkAttributes, sb *strings.Builder) { @@ -641,21 +642,21 @@ func writeFormattedPeerAddressFromNetworkAttributes(networkAttributes *NetworkAt func setAttributeValueAsPropertyOrMeasurement( key string, - attributeValue pdata.Value, + attributeValue pcommon.Value, properties map[string]string, measurements map[string]float64) { switch attributeValue.Type() { - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: properties[key] = strconv.FormatBool(attributeValue.BoolVal()) - case pdata.ValueTypeString: + case pcommon.ValueTypeString: properties[key] = attributeValue.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: measurements[key] = float64(attributeValue.IntVal()) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: measurements[key] = attributeValue.DoubleVal() } } diff --git a/exporter/azuremonitorexporter/trace_to_envelope_test.go b/exporter/azuremonitorexporter/trace_to_envelope_test.go index 28332841234a..708b13eb30f1 100644 --- a/exporter/azuremonitorexporter/trace_to_envelope_test.go +++ b/exporter/azuremonitorexporter/trace_to_envelope_test.go @@ -22,8 +22,9 @@ import ( "github.com/microsoft/ApplicationInsights-Go/appinsights/contracts" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -61,8 +62,8 @@ var ( defaultSpanIDAsHex = fmt.Sprintf("%02x", defaultSpanID) defaultParentSpanID = [8]byte{35, 191, 77, 229, 162, 242, 217, 77} defaultParentSpanIDAsHex = fmt.Sprintf("%02x", defaultParentSpanID) - defaultSpanStartTime = pdata.Timestamp(0) - defaultSpanEndTme = pdata.Timestamp(60000000000) + defaultSpanStartTime = pcommon.Timestamp(0) + defaultSpanEndTme = pcommon.Timestamp(60000000000) defaultSpanDuration = formatDuration(toTime(defaultSpanEndTme).Sub(toTime(defaultSpanStartTime))) defaultHTTPStatusCodeAsString = strconv.FormatInt(defaultHTTPStatusCode, 10) defaultRPCStatusCodeAsString = strconv.FormatInt(defaultRPCStatusCode, 10) @@ -114,11 +115,11 @@ var ( // - adds a few different types of attributes func TestHTTPServerSpanToRequestDataAttributeSet1(t *testing.T) { span := getDefaultHTTPServerSpan() - span.Status().SetCode(pdata.StatusCodeError) + span.Status().SetCode(ptrace.StatusCodeError) span.Status().SetMessage("Fubar") spanAttributes := span.Attributes() - appendToAttributeMap(spanAttributes, pdata.NewMapFromRaw(map[string]interface{}{ + appendToAttributeMap(spanAttributes, pcommon.NewMapFromRaw(map[string]interface{}{ // http.scheme, http.host, http.target => data.Url conventions.AttributeHTTPScheme: "https", conventions.AttributeHTTPHost: "foo", @@ -160,7 +161,7 @@ func TestHTTPServerSpanToRequestDataAttributeSet2(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPStatusCode: defaultHTTPStatusCode, conventions.AttributeHTTPScheme: "https", conventions.AttributeHTTPServerName: "foo", @@ -189,7 +190,7 @@ func TestHTTPServerSpanToRequestDataAttributeSet3(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPStatusCode: defaultHTTPStatusCode, conventions.AttributeHTTPScheme: "https", conventions.AttributeNetHostName: "foo", @@ -216,7 +217,7 @@ func TestHTTPServerSpanToRequestDataAttributeSet4(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPStatusCode: defaultHTTPStatusCode, conventions.AttributeHTTPURL: "https://foo:81/bar?biz=baz", })) @@ -247,7 +248,7 @@ func TestHTTPClientSpanToRemoteDependencyAttributeSet1(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPURL: "https://foo:81/bar?biz=baz", conventions.AttributeHTTPStatusCode: 400, @@ -275,7 +276,7 @@ func TestHTTPClientSpanToRemoteDependencyAttributeSet2(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ // http.scheme, http.host, http.target => data.Url conventions.AttributeHTTPStatusCode: defaultHTTPStatusCode, conventions.AttributeHTTPScheme: "https", @@ -306,7 +307,7 @@ func TestHTTPClientSpanToRemoteDependencyAttributeSet3(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPStatusCode: defaultHTTPStatusCode, conventions.AttributeHTTPScheme: "https", conventions.AttributeNetPeerName: "foo", @@ -329,7 +330,7 @@ func TestHTTPClientSpanToRemoteDependencyAttributeSet4(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPStatusCode: defaultHTTPStatusCode, conventions.AttributeHTTPScheme: "https", conventions.AttributeNetPeerIP: "127.0.0.1", @@ -351,7 +352,7 @@ func TestRPCServerSpanToRequestData(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeNetPeerName: "foo", conventions.AttributeNetPeerIP: "127.0.0.1", conventions.AttributeNetPeerPort: 81, @@ -365,7 +366,7 @@ func TestRPCServerSpanToRequestData(t *testing.T) { // test fallback to peerip appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeNetPeerName: "", conventions.AttributeNetPeerIP: "127.0.0.1", })) @@ -382,7 +383,7 @@ func TestRPCClientSpanToRemoteDependencyData(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeNetPeerName: "foo", conventions.AttributeNetPeerPort: 81, conventions.AttributeNetPeerIP: "127.0.0.1", @@ -396,7 +397,7 @@ func TestRPCClientSpanToRemoteDependencyData(t *testing.T) { // test fallback to peerip appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeNetPeerName: "", conventions.AttributeNetPeerIP: "127.0.0.1", })) @@ -406,7 +407,7 @@ func TestRPCClientSpanToRemoteDependencyData(t *testing.T) { defaultRPCRemoteDependencyDataValidations(t, span, data, "127.0.0.1:81") // test RPC error using the new rpc.grpc.status_code attribute - span.Status().SetCode(pdata.StatusCodeError) + span.Status().SetCode(ptrace.StatusCodeError) span.Status().SetMessage("Resource exhausted") spanAttributes.InsertInt(attributeRPCGRPCStatusCode, 8) @@ -425,7 +426,7 @@ func TestDatabaseClientSpanToRemoteDependencyData(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeDBStatement: defaultDBStatement, conventions.AttributeNetPeerName: "foo", conventions.AttributeNetPeerPort: 81, @@ -442,7 +443,7 @@ func TestDatabaseClientSpanToRemoteDependencyData(t *testing.T) { // Test the fallback to data.Data fallback to DBOperation appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeDBStatement: "", conventions.AttributeDBOperation: defaultDBOperation, })) @@ -459,7 +460,7 @@ func TestMessagingConsumerSpanToRequestData(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeMessagingURL: defaultMessagingURL, conventions.AttributeNetPeerName: "foo", conventions.AttributeNetPeerPort: 81, @@ -475,7 +476,7 @@ func TestMessagingConsumerSpanToRequestData(t *testing.T) { // test fallback from MessagingURL to net.* properties appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeMessagingURL: "", })) @@ -492,7 +493,7 @@ func TestMessagingProducerSpanToRequestData(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeMessagingURL: defaultMessagingURL, conventions.AttributeNetPeerName: "foo", conventions.AttributeNetPeerPort: 81, @@ -508,7 +509,7 @@ func TestMessagingProducerSpanToRequestData(t *testing.T) { // test fallback from MessagingURL to net.* properties appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeMessagingURL: "", })) @@ -525,7 +526,7 @@ func TestUnknownInternalSpanToRemoteDependencyData(t *testing.T) { appendToAttributeMap( spanAttributes, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ "foo": "bar", })) @@ -538,7 +539,7 @@ func TestUnknownInternalSpanToRemoteDependencyData(t *testing.T) { // Tests that spans with unspecified kind are treated similar to internal spans func TestUnspecifiedSpanToInProcRemoteDependencyData(t *testing.T) { span := getDefaultInternalSpan() - span.SetKind(pdata.SpanKindUnspecified) + span.SetKind(ptrace.SpanKindUnspecified) envelope, _ := spanToEnvelope(defaultResource, defaultInstrumentationLibrary, span, zap.NewNop()) commonEnvelopeValidations(t, span, envelope, defaultRemoteDependencyDataEnvelopeName) @@ -572,7 +573,7 @@ func TestSanitize(t *testing.T) { */ func commonEnvelopeValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, envelope *contracts.Envelope, expectedEnvelopeName string) { @@ -594,7 +595,7 @@ func commonEnvelopeValidations( // Validate common stuff across any Span -> RequestData translation func commonRequestDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RequestData) { assertAttributesCopiedToPropertiesOrMeasurements(t, span.Attributes(), data.Properties, data.Measurements) @@ -608,7 +609,7 @@ func commonRequestDataValidations( // Validate common RequestData values for HTTP Spans created using the default test values func defaultHTTPRequestDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RequestData) { commonRequestDataValidations(t, span, data) @@ -621,7 +622,7 @@ func defaultHTTPRequestDataValidations( // Validate common stuff across any Span -> RemoteDependencyData translation func commonRemoteDependencyDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RemoteDependencyData) { assertAttributesCopiedToPropertiesOrMeasurements(t, span.Attributes(), data.Properties, data.Measurements) @@ -632,7 +633,7 @@ func commonRemoteDependencyDataValidations( // Validate common RemoteDependencyData values for HTTP Spans created using the default test values func defaultHTTPRemoteDependencyDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RemoteDependencyData) { commonRemoteDependencyDataValidations(t, span, data) @@ -645,7 +646,7 @@ func defaultHTTPRemoteDependencyDataValidations( func defaultRPCRequestDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RequestData, expectedDataSource string) { @@ -660,7 +661,7 @@ func defaultRPCRequestDataValidations( func defaultRPCRemoteDependencyDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RemoteDependencyData, expectedDataTarget string) { @@ -678,7 +679,7 @@ func defaultRPCRemoteDependencyDataValidations( func defaultDatabaseRemoteDependencyDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RemoteDependencyData) { commonRemoteDependencyDataValidations(t, span, data) @@ -691,7 +692,7 @@ func defaultDatabaseRemoteDependencyDataValidations( func defaultMessagingRequestDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RequestData) { commonRequestDataValidations(t, span, data) @@ -703,7 +704,7 @@ func defaultMessagingRequestDataValidations( func defaultMessagingRemoteDependencyDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RemoteDependencyData) { commonRemoteDependencyDataValidations(t, span, data) @@ -716,7 +717,7 @@ func defaultMessagingRemoteDependencyDataValidations( func defaultInternalRemoteDependencyDataValidations( t *testing.T, - span pdata.Span, + span ptrace.Span, data *contracts.RemoteDependencyData) { assertAttributesCopiedToPropertiesOrMeasurements(t, span.Attributes(), data.Properties, data.Measurements) @@ -726,25 +727,25 @@ func defaultInternalRemoteDependencyDataValidations( // Verifies that all attributes are copies to either the properties or measurements maps of the envelope's data element func assertAttributesCopiedToPropertiesOrMeasurements( t *testing.T, - attributeMap pdata.Map, + attributeMap pcommon.Map, properties map[string]string, measurements map[string]float64) { - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: p, exists := properties[k] assert.True(t, exists) assert.Equal(t, v.StringVal(), p) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: p, exists := properties[k] assert.True(t, exists) assert.Equal(t, strconv.FormatBool(v.BoolVal()), p) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: m, exists := measurements[k] assert.True(t, exists) assert.Equal(t, float64(v.IntVal()), m) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: m, exists := measurements[k] assert.True(t, exists) assert.Equal(t, v.DoubleVal(), m) @@ -756,95 +757,95 @@ func assertAttributesCopiedToPropertiesOrMeasurements( /* The remainder of these methods are for building up test assets */ -func getSpan(spanName string, spanKind pdata.SpanKind, initialAttributes map[string]interface{}) pdata.Span { - span := pdata.NewSpan() - span.SetTraceID(pdata.NewTraceID(defaultTraceID)) - span.SetSpanID(pdata.NewSpanID(defaultSpanID)) - span.SetParentSpanID(pdata.NewSpanID(defaultParentSpanID)) +func getSpan(spanName string, spanKind ptrace.SpanKind, initialAttributes map[string]interface{}) ptrace.Span { + span := ptrace.NewSpan() + span.SetTraceID(pcommon.NewTraceID(defaultTraceID)) + span.SetSpanID(pcommon.NewSpanID(defaultSpanID)) + span.SetParentSpanID(pcommon.NewSpanID(defaultParentSpanID)) span.SetName(spanName) span.SetKind(spanKind) span.SetStartTimestamp(defaultSpanStartTime) span.SetEndTimestamp(defaultSpanEndTme) - pdata.NewMapFromRaw(initialAttributes).CopyTo(span.Attributes()) + pcommon.NewMapFromRaw(initialAttributes).CopyTo(span.Attributes()) return span } // Returns a default server span -func getServerSpan(spanName string, initialAttributes map[string]interface{}) pdata.Span { - return getSpan(spanName, pdata.SpanKindServer, initialAttributes) +func getServerSpan(spanName string, initialAttributes map[string]interface{}) ptrace.Span { + return getSpan(spanName, ptrace.SpanKindServer, initialAttributes) } // Returns a default client span -func getClientSpan(spanName string, initialAttributes map[string]interface{}) pdata.Span { - return getSpan(spanName, pdata.SpanKindClient, initialAttributes) +func getClientSpan(spanName string, initialAttributes map[string]interface{}) ptrace.Span { + return getSpan(spanName, ptrace.SpanKindClient, initialAttributes) } // Returns a default consumer span -func getConsumerSpan(spanName string, initialAttributes map[string]interface{}) pdata.Span { - return getSpan(spanName, pdata.SpanKindConsumer, initialAttributes) +func getConsumerSpan(spanName string, initialAttributes map[string]interface{}) ptrace.Span { + return getSpan(spanName, ptrace.SpanKindConsumer, initialAttributes) } // Returns a default producer span -func getProducerSpan(spanName string, initialAttributes map[string]interface{}) pdata.Span { - return getSpan(spanName, pdata.SpanKindProducer, initialAttributes) +func getProducerSpan(spanName string, initialAttributes map[string]interface{}) ptrace.Span { + return getSpan(spanName, ptrace.SpanKindProducer, initialAttributes) } // Returns a default internal span -func getInternalSpan(spanName string, initialAttributes map[string]interface{}) pdata.Span { - return getSpan(spanName, pdata.SpanKindInternal, initialAttributes) +func getInternalSpan(spanName string, initialAttributes map[string]interface{}) ptrace.Span { + return getSpan(spanName, ptrace.SpanKindInternal, initialAttributes) } -func getDefaultHTTPServerSpan() pdata.Span { +func getDefaultHTTPServerSpan() ptrace.Span { return getServerSpan( defaultHTTPServerSpanName, requiredHTTPAttributes) } -func getDefaultHTTPClientSpan() pdata.Span { +func getDefaultHTTPClientSpan() ptrace.Span { return getClientSpan( defaultHTTPClientSpanName, requiredHTTPAttributes) } -func getDefaultRPCServerSpan() pdata.Span { +func getDefaultRPCServerSpan() ptrace.Span { return getServerSpan( defaultRPCSpanName, requiredRPCAttributes) } -func getDefaultRPCClientSpan() pdata.Span { +func getDefaultRPCClientSpan() ptrace.Span { return getClientSpan( defaultRPCSpanName, requiredRPCAttributes) } -func getDefaultDatabaseClientSpan() pdata.Span { +func getDefaultDatabaseClientSpan() ptrace.Span { return getClientSpan( defaultDBSpanName, requiredDatabaseAttributes) } -func getDefaultMessagingConsumerSpan() pdata.Span { +func getDefaultMessagingConsumerSpan() ptrace.Span { return getConsumerSpan( defaultMessagingSpanName, requiredMessagingAttributes) } -func getDefaultMessagingProducerSpan() pdata.Span { +func getDefaultMessagingProducerSpan() ptrace.Span { return getProducerSpan( defaultMessagingSpanName, requiredMessagingAttributes) } -func getDefaultInternalSpan() pdata.Span { +func getDefaultInternalSpan() ptrace.Span { return getInternalSpan( defaultInternalSpanName, map[string]interface{}{}) } // Returns a default Resource -func getResource() pdata.Resource { - r := pdata.NewResource() +func getResource() pcommon.Resource { + r := pcommon.NewResource() r.Attributes().InsertString(conventions.AttributeServiceName, defaultServiceName) r.Attributes().InsertString(conventions.AttributeServiceNamespace, defaultServiceNamespace) r.Attributes().InsertString(conventions.AttributeServiceInstanceID, defaultServiceInstance) @@ -852,17 +853,17 @@ func getResource() pdata.Resource { } // Returns a default instrumentation library -func getInstrumentationLibrary() pdata.InstrumentationScope { - il := pdata.NewInstrumentationScope() +func getInstrumentationLibrary() pcommon.InstrumentationScope { + il := pcommon.NewInstrumentationScope() il.SetName(defaultInstrumentationLibraryName) il.SetVersion(defaultInstrumentationLibraryVersion) return il } // Adds a map of AttributeValues to an existing AttributeMap -func appendToAttributeMap(attributeMap pdata.Map, maps ...pdata.Map) { +func appendToAttributeMap(attributeMap pcommon.Map, maps ...pcommon.Map) { for _, m := range maps { - m.Range(func(k string, v pdata.Value) bool { + m.Range(func(k string, v pcommon.Value) bool { attributeMap.Upsert(k, v) return true }) diff --git a/exporter/azuremonitorexporter/traceexporter.go b/exporter/azuremonitorexporter/traceexporter.go index ba643218e064..9b0378a540fb 100644 --- a/exporter/azuremonitorexporter/traceexporter.go +++ b/exporter/azuremonitorexporter/traceexporter.go @@ -20,7 +20,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -38,8 +39,8 @@ type traceVisitor struct { // Called for each tuple of Resource, InstrumentationLibrary, and Span func (v *traceVisitor) visit( - resource pdata.Resource, - instrumentationLibrary pdata.InstrumentationScope, span pdata.Span) (ok bool) { + resource pcommon.Resource, + instrumentationLibrary pcommon.InstrumentationScope, span ptrace.Span) (ok bool) { envelope, err := spanToEnvelope(resource, instrumentationLibrary, span, v.exporter.logger) if err != nil { @@ -58,7 +59,7 @@ func (v *traceVisitor) visit( return true } -func (exporter *traceExporter) onTraceData(context context.Context, traceData pdata.Traces) error { +func (exporter *traceExporter) onTraceData(context context.Context, traceData ptrace.Traces) error { spanCount := traceData.SpanCount() if spanCount == 0 { return nil diff --git a/exporter/azuremonitorexporter/traceexporter_test.go b/exporter/azuremonitorexporter/traceexporter_test.go index e5c1e773dd04..1e1a618b394e 100644 --- a/exporter/azuremonitorexporter/traceexporter_test.go +++ b/exporter/azuremonitorexporter/traceexporter_test.go @@ -20,8 +20,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "golang.org/x/net/context" ) @@ -35,7 +35,7 @@ func TestExporterTraceDataCallbackNoSpans(t *testing.T) { mockTransportChannel := getMockTransportChannel() exporter := getExporter(defaultConfig, mockTransportChannel) - traces := pdata.NewTraces() + traces := ptrace.NewTraces() assert.NoError(t, exporter.onTraceData(context.Background(), traces)) @@ -52,7 +52,7 @@ func TestExporterTraceDataCallbackSingleSpan(t *testing.T) { instrumentationLibrary := getInstrumentationLibrary() span := getDefaultHTTPServerSpan() - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() r := rs.Resource() resource.CopyTo(r) @@ -79,7 +79,7 @@ func TestExporterTraceDataCallbackSingleSpanNoEnvelope(t *testing.T) { // of them is currently not supported. span.Attributes().InsertString(conventions.AttributeFaaSTrigger, "http") - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() r := rs.Resource() resource.CopyTo(r) diff --git a/exporter/azuremonitorexporter/traceiteration.go b/exporter/azuremonitorexporter/traceiteration.go index c2314c3dd1ff..c61aff0b7723 100644 --- a/exporter/azuremonitorexporter/traceiteration.go +++ b/exporter/azuremonitorexporter/traceiteration.go @@ -14,10 +14,13 @@ package azuremonitorexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/azuremonitorexporter" -import "go.opentelemetry.io/collector/model/pdata" +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" +) /* - Encapsulates iteration over the Spans inside pdata.Traces from the underlying representation. + Encapsulates iteration over the Spans inside ptrace.Traces from the underlying representation. Everyone is doing the same kind of iteration and checking over a set traces. */ @@ -25,11 +28,11 @@ import "go.opentelemetry.io/collector/model/pdata" type TraceVisitor interface { // Called for each tuple of Resource, InstrumentationLibrary, and Span // If Visit returns false, the iteration is short-circuited - visit(resource pdata.Resource, instrumentationLibrary pdata.InstrumentationScope, span pdata.Span) (ok bool) + visit(resource pcommon.Resource, instrumentationLibrary pcommon.InstrumentationScope, span ptrace.Span) (ok bool) } // Accept method is called to start the iteration process -func Accept(traces pdata.Traces, v TraceVisitor) { +func Accept(traces ptrace.Traces, v TraceVisitor) { resourceSpans := traces.ResourceSpans() // Walk each ResourceSpans instance diff --git a/exporter/azuremonitorexporter/traceiteration_test.go b/exporter/azuremonitorexporter/traceiteration_test.go index 7a49e0cfd98d..d8c03f5c22b4 100644 --- a/exporter/azuremonitorexporter/traceiteration_test.go +++ b/exporter/azuremonitorexporter/traceiteration_test.go @@ -18,21 +18,22 @@ import ( "testing" "github.com/stretchr/testify/mock" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) type mockVisitor struct { mock.Mock } -func (v *mockVisitor) visit(resource pdata.Resource, instrumentationLibrary pdata.InstrumentationScope, span pdata.Span) (ok bool) { +func (v *mockVisitor) visit(resource pcommon.Resource, instrumentationLibrary pcommon.InstrumentationScope, span ptrace.Span) (ok bool) { args := v.Called(resource, instrumentationLibrary, span) return args.Bool(0) } -// Tests the iteration logic over a pdata.Traces type when no ResourceSpans are provided +// Tests the iteration logic over a ptrace.Traces type when no ResourceSpans are provided func TestTraceDataIterationNoResourceSpans(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() visitor := getMockVisitor(true) @@ -41,9 +42,9 @@ func TestTraceDataIterationNoResourceSpans(t *testing.T) { visitor.AssertNumberOfCalls(t, "visit", 0) } -// Tests the iteration logic over a pdata.Traces type when a ResourceSpans is nil +// Tests the iteration logic over a ptrace.Traces type when a ResourceSpans is nil func TestTraceDataIterationResourceSpansIsEmpty(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty() visitor := getMockVisitor(true) @@ -53,9 +54,9 @@ func TestTraceDataIterationResourceSpansIsEmpty(t *testing.T) { visitor.AssertNumberOfCalls(t, "visit", 0) } -// Tests the iteration logic over a pdata.Traces type when ScopeSpans is nil +// Tests the iteration logic over a ptrace.Traces type when ScopeSpans is nil func TestTraceDataIterationScopeSpansIsEmpty(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() rs.ScopeSpans().AppendEmpty() @@ -66,9 +67,9 @@ func TestTraceDataIterationScopeSpansIsEmpty(t *testing.T) { visitor.AssertNumberOfCalls(t, "visit", 0) } -// Tests the iteration logic over a pdata.Traces type when there are no Spans +// Tests the iteration logic over a ptrace.Traces type when there are no Spans func TestTraceDataIterationNoSpans(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() rs.ScopeSpans().AppendEmpty() @@ -81,7 +82,7 @@ func TestTraceDataIterationNoSpans(t *testing.T) { // Tests the iteration logic if the visitor returns true func TestTraceDataIterationNoShortCircuit(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ilss := rs.ScopeSpans().AppendEmpty() ilss.Spans().AppendEmpty() @@ -96,7 +97,7 @@ func TestTraceDataIterationNoShortCircuit(t *testing.T) { // Tests the iteration logic short circuit if the visitor returns false func TestTraceDataIterationShortCircuit(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ilss := rs.ScopeSpans().AppendEmpty() ilss.Spans().AppendEmpty() diff --git a/exporter/carbonexporter/exporter.go b/exporter/carbonexporter/exporter.go index 2bf3bf86167f..133bcbfbbbdd 100644 --- a/exporter/carbonexporter/exporter.go +++ b/exporter/carbonexporter/exporter.go @@ -24,7 +24,7 @@ import ( agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" internaldata "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus" ) @@ -60,7 +60,7 @@ type carbonSender struct { connPool *connPool } -func (cs *carbonSender) pushMetricsData(_ context.Context, md pdata.Metrics) error { +func (cs *carbonSender) pushMetricsData(_ context.Context, md pmetric.Metrics) error { rms := md.ResourceMetrics() mds := make([]*agentmetricspb.ExportMetricsServiceRequest, 0, rms.Len()) for i := 0; i < rms.Len(); i++ { diff --git a/exporter/carbonexporter/exporter_test.go b/exporter/carbonexporter/exporter_test.go index 551ea029bc01..52d432137f2c 100644 --- a/exporter/carbonexporter/exporter_test.go +++ b/exporter/carbonexporter/exporter_test.go @@ -33,7 +33,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "google.golang.org/protobuf/types/known/timestamppb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" @@ -99,7 +99,7 @@ func TestConsumeMetricsData(t *testing.T) { tests := []struct { name string - md pdata.Metrics + md pmetric.Metrics acceptClient bool createServer bool }{ @@ -280,7 +280,7 @@ func Test_connPool_Concurrency(t *testing.T) { recvWG.Wait() } -func generateLargeBatch() pdata.Metrics { +func generateLargeBatch() pmetric.Metrics { var metrics []*metricspb.Metric ts := time.Now() for i := 0; i < 65000; i++ { diff --git a/exporter/carbonexporter/go.mod b/exporter/carbonexporter/go.mod index 52cd08e9d7bd..a7cc6203ee12 100644 --- a/exporter/carbonexporter/go.mod +++ b/exporter/carbonexporter/go.mod @@ -8,34 +8,34 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 google.golang.org/protobuf v1.28.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -48,3 +48,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/carbonexporter/go.sum b/exporter/carbonexporter/go.sum index 784d52c3fb9d..f906e9a47221 100644 --- a/exporter/carbonexporter/go.sum +++ b/exporter/carbonexporter/go.sum @@ -18,8 +18,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -92,7 +92,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -124,8 +123,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -168,8 +167,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -185,17 +182,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -232,8 +231,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -257,8 +256,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/exporter/clickhouseexporter/exporter.go b/exporter/clickhouseexporter/exporter.go index 284a43991f5c..e5fa6a00dec5 100644 --- a/exporter/clickhouseexporter/exporter.go +++ b/exporter/clickhouseexporter/exporter.go @@ -22,7 +22,8 @@ import ( "time" _ "github.com/ClickHouse/clickhouse-go" // For register database driver. - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -62,7 +63,7 @@ func (e *clickhouseExporter) Shutdown(_ context.Context) error { return nil } -func (e *clickhouseExporter) pushLogsData(ctx context.Context, ld pdata.Logs) error { +func (e *clickhouseExporter) pushLogsData(ctx context.Context, ld plog.Logs) error { start := time.Now() err := doWithTx(ctx, e.client, func(tx *sql.Tx) error { statement, err := tx.PrepareContext(ctx, e.insertLogsSQL) @@ -108,10 +109,10 @@ func (e *clickhouseExporter) pushLogsData(ctx context.Context, ld pdata.Logs) er return err } -func attributesToSlice(attributes pdata.Map) ([]string, []string) { +func attributesToSlice(attributes pcommon.Map) ([]string, []string) { keys := make([]string, 0, attributes.Len()) values := make([]string, 0, attributes.Len()) - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { keys = append(keys, formatKey(k)) values = append(values, v.AsString()) return true diff --git a/exporter/clickhouseexporter/exporter_test.go b/exporter/clickhouseexporter/exporter_test.go index 3ccd2b4b9374..b94db04ac937 100644 --- a/exporter/clickhouseexporter/exporter_test.go +++ b/exporter/clickhouseexporter/exporter_test.go @@ -24,7 +24,8 @@ import ( "time" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "go.uber.org/zap/zaptest" ) @@ -122,19 +123,19 @@ func withTestExporterConfig(fns ...func(*Config)) func(string) *Config { } } -func simpleLogs(count int) pdata.Logs { - logs := pdata.NewLogs() +func simpleLogs(count int) plog.Logs { + logs := plog.NewLogs() rl := logs.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() for i := 0; i < count; i++ { r := sl.LogRecords().AppendEmpty() - r.SetTimestamp(pdata.NewTimestampFromTime(time.Now())) + r.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) r.Attributes().InsertString("k", "v") } return logs } -func mustPushLogsData(t *testing.T, exporter *clickhouseExporter, ld pdata.Logs) { +func mustPushLogsData(t *testing.T, exporter *clickhouseExporter, ld plog.Logs) { err := exporter.pushLogsData(context.TODO(), ld) require.NoError(t, err) } diff --git a/exporter/clickhouseexporter/go.mod b/exporter/clickhouseexporter/go.mod index 723a136528ac..b7576a1710c5 100644 --- a/exporter/clickhouseexporter/go.mod +++ b/exporter/clickhouseexporter/go.mod @@ -3,8 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/clickh go 1.17 require ( - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -14,17 +13,17 @@ require go.uber.org/multierr v1.8.0 require ( github.com/ClickHouse/clickhouse-go v1.5.4 github.com/stretchr/testify v1.7.1 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/benbjohnson/clock v1.3.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -32,19 +31,14 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/clickhouseexporter/go.sum b/exporter/clickhouseexporter/go.sum index ae5d0b63cf25..a0b16d70050d 100644 --- a/exporter/clickhouseexporter/go.sum +++ b/exporter/clickhouseexporter/go.sum @@ -1,10 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0= github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -23,21 +20,13 @@ github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk= github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -45,9 +34,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -55,7 +41,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -73,18 +58,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -94,13 +75,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -131,8 +109,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -176,21 +154,16 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -200,20 +173,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -237,20 +209,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -266,22 +234,18 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -302,22 +266,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -327,11 +285,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -339,8 +293,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/coralogixexporter/client.go b/exporter/coralogixexporter/client.go index ccaca945b68a..41bc1ade5a8a 100644 --- a/exporter/coralogixexporter/client.go +++ b/exporter/coralogixexporter/client.go @@ -20,7 +20,7 @@ import ( cxpb "github.com/coralogix/opentelemetry-cx-protobuf-api/coralogixpb" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -60,7 +60,7 @@ func (c *coralogixClient) startConnection(ctx context.Context, host component.Ho return nil } -func (c *coralogixClient) newPost(ctx context.Context, td pdata.Traces) error { +func (c *coralogixClient) newPost(ctx context.Context, td ptrace.Traces) error { batches, err := jaeger.ProtoFromTraces(td) if err != nil { return fmt.Errorf("can't translate to jaeger proto: %w", err) diff --git a/exporter/coralogixexporter/config_test.go b/exporter/coralogixexporter/config_test.go index cbd1df8919a8..a2f95acaee49 100644 --- a/exporter/coralogixexporter/config_test.go +++ b/exporter/coralogixexporter/config_test.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/service/servicetest" ) @@ -77,7 +77,7 @@ func TestExporter(t *testing.T) { te := newCoralogixExporter(apiConfig, params) te.client.startConnection(context.Background(), componenttest.NewNopHost()) assert.NotNil(t, te, "failed to create trace exporter") - td := pdata.NewTraces() + td := ptrace.NewTraces() err := te.tracesPusher(context.Background(), td) assert.Nil(t, err) } diff --git a/exporter/coralogixexporter/exporter.go b/exporter/coralogixexporter/exporter.go index b9d1b835490d..ee62a8c1274d 100644 --- a/exporter/coralogixexporter/exporter.go +++ b/exporter/coralogixexporter/exporter.go @@ -18,7 +18,7 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -38,6 +38,6 @@ func newCoralogixExporter(cfg *Config, params component.ExporterCreateSettings) } } -func (cx *coralogixExporter) tracesPusher(ctx context.Context, td pdata.Traces) error { +func (cx *coralogixExporter) tracesPusher(ctx context.Context, td ptrace.Traces) error { return cx.client.newPost(ctx, td) } diff --git a/exporter/coralogixexporter/go.mod b/exporter/coralogixexporter/go.mod index 48e9d76627fc..0199a1ffc1b1 100644 --- a/exporter/coralogixexporter/go.mod +++ b/exporter/coralogixexporter/go.mod @@ -4,20 +4,20 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d ) require ( github.com/coralogix/opentelemetry-cx-protobuf-api/coralogixpb v0.0.0-20211201100428-d2a5d0ecf53e github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 ) require ( github.com/apache/thrift v0.16.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -27,7 +27,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/jaegertracing/jaeger v1.32.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -35,18 +35,18 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -57,3 +57,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/coralogixexporter/go.sum b/exporter/coralogixexporter/go.sum index 74b3293d6948..bc1dab821bde 100644 --- a/exporter/coralogixexporter/go.sum +++ b/exporter/coralogixexporter/go.sum @@ -23,8 +23,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -104,7 +104,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -140,8 +139,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -197,8 +196,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -219,10 +216,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -231,7 +230,7 @@ go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= @@ -270,8 +269,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -297,8 +296,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/exporter/datadogexporter/factory.go b/exporter/datadogexporter/factory.go index 902c7d5e9f42..72c95308a5df 100644 --- a/exporter/datadogexporter/factory.go +++ b/exporter/datadogexporter/factory.go @@ -25,7 +25,9 @@ import ( "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ddconfig "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/config" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata" @@ -136,10 +138,10 @@ func (f *factory) createMetricsExporter( var pushMetricsFn consumer.ConsumeMetricsFunc if cfg.OnlyMetadata { - pushMetricsFn = func(_ context.Context, md pdata.Metrics) error { + pushMetricsFn = func(_ context.Context, md pmetric.Metrics) error { // only sending metadata use only metrics f.onceMetadata.Do(func() { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() if md.ResourceMetrics().Len() > 0 { attrs = md.ResourceMetrics().At(0).Resource().Attributes() } @@ -196,10 +198,10 @@ func (f *factory) createTracesExporter( var pushTracesFn consumer.ConsumeTracesFunc if cfg.OnlyMetadata { - pushTracesFn = func(_ context.Context, td pdata.Traces) error { + pushTracesFn = func(_ context.Context, td ptrace.Traces) error { // only sending metadata, use only attributes f.onceMetadata.Do(func() { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() if td.ResourceSpans().Len() > 0 { attrs = td.ResourceSpans().At(0).Resource().Attributes() } diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index 501148d2c2c4..1fd39cddf539 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -13,8 +13,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.48.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 gopkg.in/DataDog/dd-trace-go.v1 v1.37.1 @@ -34,9 +35,8 @@ require ( github.com/dustin/go-humanize v1.0.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -44,7 +44,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/shirou/gopsutil v2.20.9+incompatible // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tinylib/msgp v1.1.2 // indirect github.com/zorkian/go-datadog-api v2.30.0+incompatible // indirect go.opencensus.io v0.23.0 // indirect @@ -52,13 +51,8 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 // indirect - golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) @@ -70,3 +64,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourceto // see https://github.com/go-chi/chi/issues/713 // see https://github.com/DataDog/dd-trace-go/issues/1220 replace github.com/go-chi/chi/v4 => github.com/go-chi/chi v4.0.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index 2747c64e2a4b..04a22e8f79c7 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -138,8 +138,6 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= @@ -190,9 +188,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.8/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -367,11 +362,9 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.5.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.15.2/go.mod h1:vO11I9oWA+KsxmfFQPhLnnIb1VDE24M+pdxZFiuZcA8= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/consul/api v1.0.0/go.mod h1:mbFwfRxOTDHZpT3iUsMAFcLNoVm6Xbe1xZ6KiSm8FY0= github.com/hashicorp/consul/internal v0.1.0/go.mod h1:zi9bMZYbiPHyAjgBWo7kCUcy5l2NrTdrkVupCc7Oo6c= @@ -528,8 +521,8 @@ github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8 github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/crc32 v1.2.0/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -705,8 +698,6 @@ github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h github.com/smartystreets/gunit v1.1.3/go.mod h1:EH5qMBab2UclzXUcpR8b93eHsIlp9u+pDQIRp5DZNzQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -795,10 +786,12 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= @@ -806,10 +799,9 @@ go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -936,8 +928,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1160,7 +1152,6 @@ google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEY google.golang.org/genproto v0.0.0-20200528110217-3d3490e7e671/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1176,12 +1167,8 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1196,7 +1183,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/DataDog/dd-trace-go.v1 v1.37.1 h1:rgO9oC3Mr7es0zKIsaooL50UY0qboUDdueP7MzH1fUI= gopkg.in/DataDog/dd-trace-go.v1 v1.37.1/go.mod h1:HMpV5TQ38YAfRJ8OlM7UfLyGg6D01MPNHRFwsnrGMB0= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= diff --git a/exporter/datadogexporter/internal/metadata/metadata.go b/exporter/datadogexporter/internal/metadata/metadata.go index e3687529ebf9..8e3d106be9a6 100644 --- a/exporter/datadogexporter/internal/metadata/metadata.go +++ b/exporter/datadogexporter/internal/metadata/metadata.go @@ -23,8 +23,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata/ec2" @@ -92,7 +92,7 @@ type Meta struct { // metadataFromAttributes gets metadata info from attributes following // OpenTelemetry semantic conventions -func metadataFromAttributes(attrs pdata.Map) *HostMetadata { +func metadataFromAttributes(attrs pcommon.Map) *HostMetadata { hm := &HostMetadata{Meta: &Meta{}, Tags: &HostTags{}} if hostname, ok := attributes.HostnameFromAttributes(attrs); ok { @@ -196,7 +196,7 @@ func pushMetadataWithRetry(retrier *utils.Retrier, params component.ExporterCrea } // Pusher pushes host metadata payloads periodically to Datadog intake -func Pusher(ctx context.Context, params component.ExporterCreateSettings, pcfg PusherConfig, attrs pdata.Map) { +func Pusher(ctx context.Context, params component.ExporterCreateSettings, pcfg PusherConfig, attrs pcommon.Map) { // Push metadata every 30 minutes ticker := time.NewTicker(30 * time.Minute) defer ticker.Stop() diff --git a/exporter/datadogexporter/internal/metrics/consumer_test.go b/exporter/datadogexporter/internal/metrics/consumer_test.go index d9610a73c245..6d2e3e0bb0e1 100644 --- a/exporter/datadogexporter/internal/metrics/consumer_test.go +++ b/exporter/datadogexporter/internal/metrics/consumer_test.go @@ -21,8 +21,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/attributes" @@ -47,20 +48,20 @@ func newTranslator(t *testing.T, logger *zap.Logger) *translator.Translator { } func TestRunningMetrics(t *testing.T) { - ms := pdata.NewMetrics() + ms := pmetric.NewMetrics() rms := ms.ResourceMetrics() rm := rms.AppendEmpty() resAttrs := rm.Resource().Attributes() - resAttrs.Insert(attributes.AttributeDatadogHostname, pdata.NewValueString("resource-hostname-1")) + resAttrs.Insert(attributes.AttributeDatadogHostname, pcommon.NewValueString("resource-hostname-1")) rm = rms.AppendEmpty() resAttrs = rm.Resource().Attributes() - resAttrs.Insert(attributes.AttributeDatadogHostname, pdata.NewValueString("resource-hostname-1")) + resAttrs.Insert(attributes.AttributeDatadogHostname, pcommon.NewValueString("resource-hostname-1")) rm = rms.AppendEmpty() resAttrs = rm.Resource().Attributes() - resAttrs.Insert(attributes.AttributeDatadogHostname, pdata.NewValueString("resource-hostname-2")) + resAttrs.Insert(attributes.AttributeDatadogHostname, pcommon.NewValueString("resource-hostname-2")) rms.AppendEmpty() @@ -86,7 +87,7 @@ func TestRunningMetrics(t *testing.T) { } func TestTagsMetrics(t *testing.T) { - ms := pdata.NewMetrics() + ms := pmetric.NewMetrics() rms := ms.ResourceMetrics() rm := rms.AppendEmpty() diff --git a/exporter/datadogexporter/internal/model/attributes/attributes.go b/exporter/datadogexporter/internal/model/attributes/attributes.go index 1a19b70809d6..f5834652e4ba 100644 --- a/exporter/datadogexporter/internal/model/attributes/attributes.go +++ b/exporter/datadogexporter/internal/model/attributes/attributes.go @@ -18,8 +18,8 @@ import ( "fmt" "strings" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) var ( @@ -118,13 +118,13 @@ var ( // TagsFromAttributes converts a selected list of attributes // to a tag list that can be added to metrics. -func TagsFromAttributes(attrs pdata.Map) []string { +func TagsFromAttributes(attrs pcommon.Map) []string { tags := make([]string, 0, attrs.Len()) var processAttributes processAttributes var systemAttributes systemAttributes - attrs.Range(func(key string, value pdata.Value) bool { + attrs.Range(func(key string, value pcommon.Value) bool { switch key { // Process attributes case conventions.AttributeProcessExecutableName: @@ -165,7 +165,7 @@ func TagsFromAttributes(attrs pdata.Map) []string { // OriginIDFromAttributes gets the origin IDs from resource attributes. // If not found, an empty string is returned for each of them. -func OriginIDFromAttributes(attrs pdata.Map) (originID string) { +func OriginIDFromAttributes(attrs pcommon.Map) (originID string) { // originID is always empty. Container ID is preferred over Kubernetes pod UID. // Prefixes come from pkg/util/kubernetes/kubelet and pkg/util/containers. if containerID, ok := attrs.Get(conventions.AttributeContainerID); ok { @@ -177,7 +177,7 @@ func OriginIDFromAttributes(attrs pdata.Map) (originID string) { } // RunningTagsFromAttributes gets tags used for running metrics from attributes. -func RunningTagsFromAttributes(attrs pdata.Map) []string { +func RunningTagsFromAttributes(attrs pcommon.Map) []string { tags := make([]string, 0, 1) for _, key := range runningTagsAttributes { if val, ok := attrs.Get(key); ok { diff --git a/exporter/datadogexporter/internal/model/attributes/attributes_test.go b/exporter/datadogexporter/internal/model/attributes/attributes_test.go index d270989d3a04..7a59a5da90d9 100644 --- a/exporter/datadogexporter/internal/model/attributes/attributes_test.go +++ b/exporter/datadogexporter/internal/model/attributes/attributes_test.go @@ -19,8 +19,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestTagsFromAttributes(t *testing.T) { @@ -36,7 +36,7 @@ func TestTagsFromAttributes(t *testing.T) { conventions.AttributeAWSECSClusterARN: "cluster_arn", "tags.datadoghq.com/service": "service_name", } - attrs := pdata.NewMapFromRaw(attributeMap) + attrs := pcommon.NewMapFromRaw(attributeMap) assert.ElementsMatch(t, []string{ fmt.Sprintf("%s:%s", conventions.AttributeProcessExecutableName, "otelcol"), @@ -48,7 +48,7 @@ func TestTagsFromAttributes(t *testing.T) { } func TestTagsFromAttributesEmpty(t *testing.T) { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() assert.Equal(t, []string{}, TagsFromAttributes(attrs)) } @@ -85,12 +85,12 @@ func TestContainerTagFromAttributesEmpty(t *testing.T) { func TestOriginIDFromAttributes(t *testing.T) { tests := []struct { name string - attrs pdata.Map + attrs pcommon.Map originID string }{ { name: "pod UID and container ID", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeContainerID: "container_id_goes_here", conventions.AttributeK8SPodUID: "k8s_pod_uid_goes_here", }), @@ -98,21 +98,21 @@ func TestOriginIDFromAttributes(t *testing.T) { }, { name: "only container ID", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeContainerID: "container_id_goes_here", }), originID: "container_id://container_id_goes_here", }, { name: "only pod UID", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeK8SPodUID: "k8s_pod_uid_goes_here", }), originID: "kubernetes_pod_uid://k8s_pod_uid_goes_here", }, { name: "none", - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), }, } diff --git a/exporter/datadogexporter/internal/model/attributes/azure/azure.go b/exporter/datadogexporter/internal/model/attributes/azure/azure.go index b6545ad0d73a..2ac9c11e3571 100644 --- a/exporter/datadogexporter/internal/model/attributes/azure/azure.go +++ b/exporter/datadogexporter/internal/model/attributes/azure/azure.go @@ -17,8 +17,8 @@ package azure // import "github.com/open-telemetry/opentelemetry-collector-contr import ( "strings" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) const ( @@ -33,7 +33,7 @@ type HostInfo struct { // HostInfoFromAttributes gets Azure host info from attributes following // OpenTelemetry semantic conventions -func HostInfoFromAttributes(attrs pdata.Map) (hostInfo *HostInfo) { +func HostInfoFromAttributes(attrs pcommon.Map) (hostInfo *HostInfo) { hostInfo = &HostInfo{} // Add Azure VM ID as a host alias if available for compatibility with Azure integration @@ -45,7 +45,7 @@ func HostInfoFromAttributes(attrs pdata.Map) (hostInfo *HostInfo) { } // HostnameFromAttributes gets the Azure hostname from attributes -func HostnameFromAttributes(attrs pdata.Map) (string, bool) { +func HostnameFromAttributes(attrs pcommon.Map) (string, bool) { if hostname, ok := attrs.Get(conventions.AttributeHostName); ok { return hostname.StringVal(), true } @@ -54,7 +54,7 @@ func HostnameFromAttributes(attrs pdata.Map) (string, bool) { } // ClusterNameFromAttributes gets the Azure cluster name from attributes -func ClusterNameFromAttributes(attrs pdata.Map) (string, bool) { +func ClusterNameFromAttributes(attrs pcommon.Map) (string, bool) { // Get cluster name from resource group from pkg/util/cloudprovider/azure:GetClusterName if resourceGroup, ok := attrs.Get(AttributeResourceGroupName); ok { splitAll := strings.Split(resourceGroup.StringVal(), "_") diff --git a/exporter/datadogexporter/internal/model/attributes/ec2/ec2.go b/exporter/datadogexporter/internal/model/attributes/ec2/ec2.go index e636f852d596..4d5f39cc7127 100644 --- a/exporter/datadogexporter/internal/model/attributes/ec2/ec2.go +++ b/exporter/datadogexporter/internal/model/attributes/ec2/ec2.go @@ -18,8 +18,8 @@ import ( "fmt" "strings" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) var ( @@ -48,7 +48,7 @@ func isDefaultHostname(hostname string) bool { // HostnameFromAttributes gets a valid hostname from labels // if available -func HostnameFromAttributes(attrs pdata.Map) (string, bool) { +func HostnameFromAttributes(attrs pcommon.Map) (string, bool) { hostName, ok := attrs.Get(conventions.AttributeHostName) if ok && !isDefaultHostname(hostName.StringVal()) { return hostName.StringVal(), true @@ -63,7 +63,7 @@ func HostnameFromAttributes(attrs pdata.Map) (string, bool) { // HostInfoFromAttributes gets EC2 host info from attributes following // OpenTelemetry semantic conventions -func HostInfoFromAttributes(attrs pdata.Map) (hostInfo *HostInfo) { +func HostInfoFromAttributes(attrs pcommon.Map) (hostInfo *HostInfo) { hostInfo = &HostInfo{} if hostID, ok := attrs.Get(conventions.AttributeHostID); ok { @@ -74,7 +74,7 @@ func HostInfoFromAttributes(attrs pdata.Map) (hostInfo *HostInfo) { hostInfo.EC2Hostname = hostName.StringVal() } - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { if strings.HasPrefix(k, ec2TagPrefix) { tag := fmt.Sprintf("%s:%s", strings.TrimPrefix(k, ec2TagPrefix), v.StringVal()) hostInfo.EC2Tags = append(hostInfo.EC2Tags, tag) @@ -86,10 +86,10 @@ func HostInfoFromAttributes(attrs pdata.Map) (hostInfo *HostInfo) { } // ClusterNameFromAttributes gets the AWS cluster name from attributes -func ClusterNameFromAttributes(attrs pdata.Map) (clusterName string, ok bool) { +func ClusterNameFromAttributes(attrs pcommon.Map) (clusterName string, ok bool) { // Get cluster name from tag keys // https://github.com/DataDog/datadog-agent/blob/1c94b11/pkg/util/ec2/ec2.go#L238 - attrs.Range(func(k string, _ pdata.Value) bool { + attrs.Range(func(k string, _ pcommon.Value) bool { if strings.HasPrefix(k, clusterTagPrefix) { clusterName = strings.Split(k, "/")[2] ok = true diff --git a/exporter/datadogexporter/internal/model/attributes/gcp/gcp.go b/exporter/datadogexporter/internal/model/attributes/gcp/gcp.go index 460946a03f0b..36b17ba2b8e8 100644 --- a/exporter/datadogexporter/internal/model/attributes/gcp/gcp.go +++ b/exporter/datadogexporter/internal/model/attributes/gcp/gcp.go @@ -17,8 +17,8 @@ package gcp // import "github.com/open-telemetry/opentelemetry-collector-contrib import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) // HostInfo holds the GCP host information. @@ -29,7 +29,7 @@ type HostInfo struct { // HostnameFromAttributes gets a valid hostname from labels // if available -func HostnameFromAttributes(attrs pdata.Map) (string, bool) { +func HostnameFromAttributes(attrs pcommon.Map) (string, bool) { if hostName, ok := attrs.Get(conventions.AttributeHostName); ok { return hostName.StringVal(), true } @@ -39,7 +39,7 @@ func HostnameFromAttributes(attrs pdata.Map) (string, bool) { // HostInfoFromAttributes gets GCP host info from attributes following // OpenTelemetry semantic conventions -func HostInfoFromAttributes(attrs pdata.Map) (hostInfo *HostInfo) { +func HostInfoFromAttributes(attrs pcommon.Map) (hostInfo *HostInfo) { hostInfo = &HostInfo{} if hostID, ok := attrs.Get(conventions.AttributeHostID); ok { diff --git a/exporter/datadogexporter/internal/model/attributes/hostname.go b/exporter/datadogexporter/internal/model/attributes/hostname.go index 4188a09d4857..1872f05c3422 100644 --- a/exporter/datadogexporter/internal/model/attributes/hostname.go +++ b/exporter/datadogexporter/internal/model/attributes/hostname.go @@ -15,8 +15,8 @@ package attributes // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/attributes" import ( - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/attributes/azure" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/attributes/ec2" @@ -30,7 +30,7 @@ const ( AttributeK8sNodeName = "k8s.node.name" ) -func getClusterName(attrs pdata.Map) (string, bool) { +func getClusterName(attrs pcommon.Map) (string, bool) { if k8sClusterName, ok := attrs.Get(conventions.AttributeK8SClusterName); ok { return k8sClusterName.StringVal(), true } @@ -55,7 +55,7 @@ func getClusterName(attrs pdata.Map) (string, bool) { // 6. the host.name attribute. // // It returns a boolean value indicated if any name was found -func HostnameFromAttributes(attrs pdata.Map) (string, bool) { +func HostnameFromAttributes(attrs pcommon.Map) (string, bool) { // Check if the host is localhost or 0.0.0.0, if so discard it. // We don't do the more strict validation done for metadata, // to avoid breaking users existing invalid-but-accepted hostnames. @@ -75,7 +75,7 @@ func HostnameFromAttributes(attrs pdata.Map) (string, bool) { return candidateHost, ok } -func unsanitizedHostnameFromAttributes(attrs pdata.Map) (string, bool) { +func unsanitizedHostnameFromAttributes(attrs pcommon.Map) (string, bool) { // Custom hostname: useful for overriding in k8s/cloud envs if customHostname, ok := attrs.Get(AttributeDatadogHostname); ok { return customHostname.StringVal(), true diff --git a/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata.go b/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata.go index 6622dd1957ba..bae1e8893b03 100644 --- a/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata.go +++ b/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata.go @@ -15,7 +15,7 @@ package instrumentationlibrary // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/internal/instrumentationlibrary" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/internal/utils" ) @@ -27,7 +27,7 @@ const ( // TagsFromInstrumentationLibraryMetadata takes the name and version of // the instrumentation library and converts them to Datadog tags. -func TagsFromInstrumentationLibraryMetadata(il pdata.InstrumentationScope) []string { +func TagsFromInstrumentationLibraryMetadata(il pcommon.InstrumentationScope) []string { return []string{ utils.FormatKeyValueTag(instrumentationLibraryTag, il.Name()), utils.FormatKeyValueTag(instrumentationLibraryVersionTag, il.Version()), diff --git a/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata_test.go b/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata_test.go index 8f22041016d2..b25cf1071c0b 100644 --- a/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata_test.go +++ b/exporter/datadogexporter/internal/model/internal/instrumentationlibrary/metadata_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestTagsFromInstrumentationLibraryMetadata(t *testing.T) { @@ -35,7 +35,7 @@ func TestTagsFromInstrumentationLibraryMetadata(t *testing.T) { } for _, testInstance := range tests { - il := pdata.NewInstrumentationScope() + il := pcommon.NewInstrumentationScope() il.SetName(testInstance.name) il.SetVersion(testInstance.version) tags := TagsFromInstrumentationLibraryMetadata(il) diff --git a/exporter/datadogexporter/internal/model/internal/testutils/test_utils.go b/exporter/datadogexporter/internal/model/internal/testutils/test_utils.go index 31fba0a6eabe..e6e5569d858b 100644 --- a/exporter/datadogexporter/internal/model/internal/testutils/test_utils.go +++ b/exporter/datadogexporter/internal/model/internal/testutils/test_utils.go @@ -14,20 +14,20 @@ package testutils // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/internal/testutils" -import "go.opentelemetry.io/collector/model/pdata" +import "go.opentelemetry.io/collector/pdata/pcommon" -func fillAttributeMap(attrs pdata.Map, mp map[string]string) { +func fillAttributeMap(attrs pcommon.Map, mp map[string]string) { attrs.Clear() attrs.EnsureCapacity(len(mp)) for k, v := range mp { - attrs.Insert(k, pdata.NewValueString(v)) + attrs.Insert(k, pcommon.NewValueString(v)) } } // NewAttributeMap creates a new attribute map (string only) // from a Go map -func NewAttributeMap(mp map[string]string) pdata.Map { - attrs := pdata.NewMap() +func NewAttributeMap(mp map[string]string) pcommon.Map { + attrs := pcommon.NewMap() fillAttributeMap(attrs, mp) return attrs } diff --git a/exporter/datadogexporter/internal/model/translator/dimensions.go b/exporter/datadogexporter/internal/model/translator/dimensions.go index 2646ea6a7c68..a79b50d2f707 100644 --- a/exporter/datadogexporter/internal/model/translator/dimensions.go +++ b/exporter/datadogexporter/internal/model/translator/dimensions.go @@ -19,7 +19,7 @@ import ( "sort" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/internal/utils" ) @@ -58,9 +58,9 @@ func (d *Dimensions) OriginID() string { } // getTags maps an attributeMap into a slice of Datadog tags -func getTags(labels pdata.Map) []string { +func getTags(labels pcommon.Map) []string { tags := make([]string, 0, labels.Len()) - labels.Range(func(key string, value pdata.Value) bool { + labels.Range(func(key string, value pcommon.Value) bool { v := value.AsString() tags = append(tags, utils.FormatKeyValueTag(key, v)) return true @@ -83,7 +83,7 @@ func (d *Dimensions) AddTags(tags ...string) *Dimensions { } // WithAttributeMap creates a new metricDimensions struct with additional tags from attributes. -func (d *Dimensions) WithAttributeMap(labels pdata.Map) *Dimensions { +func (d *Dimensions) WithAttributeMap(labels pcommon.Map) *Dimensions { return d.AddTags(getTags(labels)...) } diff --git a/exporter/datadogexporter/internal/model/translator/dimensions_test.go b/exporter/datadogexporter/internal/model/translator/dimensions_test.go index fe0a396f5f4c..fe74b54b9a12 100644 --- a/exporter/datadogexporter/internal/model/translator/dimensions_test.go +++ b/exporter/datadogexporter/internal/model/translator/dimensions_test.go @@ -18,11 +18,11 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestWithAttributeMap(t *testing.T) { - attributes := pdata.NewMapFromRaw(map[string]interface{}{ + attributes := pcommon.NewMapFromRaw(map[string]interface{}{ "key1": "val1", "key2": "val2", "key3": "", @@ -110,7 +110,7 @@ func TestAllFieldsAreCopied(t *testing.T) { newDims := dims. AddTags("tagThree:c"). WithSuffix("suffix"). - WithAttributeMap(pdata.NewMapFromRaw(map[string]interface{}{ + WithAttributeMap(pcommon.NewMapFromRaw(map[string]interface{}{ "tagFour": "d", })) diff --git a/exporter/datadogexporter/internal/model/translator/metrics_translator.go b/exporter/datadogexporter/internal/model/translator/metrics_translator.go index 94fd661f844c..0b28de1766f9 100644 --- a/exporter/datadogexporter/internal/model/translator/metrics_translator.go +++ b/exporter/datadogexporter/internal/model/translator/metrics_translator.go @@ -21,7 +21,7 @@ import ( "strconv" "github.com/DataDog/datadog-agent/pkg/quantile" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/model/attributes" @@ -67,10 +67,10 @@ func New(logger *zap.Logger, options ...Option) (*Translator, error) { } // isCumulativeMonotonic checks if a metric is a cumulative monotonic metric -func isCumulativeMonotonic(md pdata.Metric) bool { +func isCumulativeMonotonic(md pmetric.Metric) bool { switch md.DataType() { - case pdata.MetricDataTypeSum: - return md.Sum().AggregationTemporality() == pdata.MetricAggregationTemporalityCumulative && + case pmetric.MetricDataTypeSum: + return md.Sum().AggregationTemporality() == pmetric.MetricAggregationTemporalityCumulative && md.Sum().IsMonotonic() } return false @@ -92,7 +92,7 @@ func (t *Translator) mapNumberMetrics( consumer TimeSeriesConsumer, dims *Dimensions, dt MetricDataType, - slice pdata.NumberDataPointSlice, + slice pmetric.NumberDataPointSlice, ) { for i := 0; i < slice.Len(); i++ { @@ -100,9 +100,9 @@ func (t *Translator) mapNumberMetrics( pointDims := dims.WithAttributeMap(p.Attributes()) var val float64 switch p.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val = p.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val = float64(p.IntVal()) } @@ -119,7 +119,7 @@ func (t *Translator) mapNumberMonotonicMetrics( ctx context.Context, consumer TimeSeriesConsumer, dims *Dimensions, - slice pdata.NumberDataPointSlice, + slice pmetric.NumberDataPointSlice, ) { for i := 0; i < slice.Len(); i++ { p := slice.At(i) @@ -129,9 +129,9 @@ func (t *Translator) mapNumberMonotonicMetrics( var val float64 switch p.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val = p.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val = float64(p.IntVal()) } @@ -145,7 +145,7 @@ func (t *Translator) mapNumberMonotonicMetrics( } } -func getBounds(p pdata.HistogramDataPoint, idx int) (lowerBound float64, upperBound float64) { +func getBounds(p pmetric.HistogramDataPoint, idx int) (lowerBound float64, upperBound float64) { // See https://github.com/open-telemetry/opentelemetry-proto/blob/v0.10.0/opentelemetry/proto/metrics/v1/metrics.proto#L427-L439 lowerBound = math.Inf(-1) upperBound = math.Inf(1) @@ -171,7 +171,7 @@ func (t *Translator) getSketchBuckets( ctx context.Context, consumer SketchConsumer, pointDims *Dimensions, - p pdata.HistogramDataPoint, + p pmetric.HistogramDataPoint, histInfo histogramInfo, delta bool, ) { @@ -223,7 +223,7 @@ func (t *Translator) getLegacyBuckets( ctx context.Context, consumer TimeSeriesConsumer, pointDims *Dimensions, - p pdata.HistogramDataPoint, + p pmetric.HistogramDataPoint, delta bool, ) { startTs := uint64(p.StartTimestamp()) @@ -264,7 +264,7 @@ func (t *Translator) mapHistogramMetrics( ctx context.Context, consumer Consumer, dims *Dimensions, - slice pdata.HistogramDataPointSlice, + slice pmetric.HistogramDataPointSlice, delta bool, ) { for i := 0; i < slice.Len(); i++ { @@ -344,7 +344,7 @@ func (t *Translator) mapSummaryMetrics( ctx context.Context, consumer TimeSeriesConsumer, dims *Dimensions, - slice pdata.SummaryDataPointSlice, + slice pmetric.SummaryDataPointSlice, ) { for i := 0; i < slice.Len(); i++ { @@ -388,7 +388,7 @@ func (t *Translator) mapSummaryMetrics( } // MapMetrics maps OTLP metrics into the DataDog format -func (t *Translator) MapMetrics(ctx context.Context, md pdata.Metrics, consumer Consumer) error { +func (t *Translator) MapMetrics(ctx context.Context, md pmetric.Metrics, consumer Consumer) error { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rm := rms.At(i) @@ -441,40 +441,40 @@ func (t *Translator) MapMetrics(ctx context.Context, md pdata.Metrics, consumer originID: attributes.OriginIDFromAttributes(rm.Resource().Attributes()), } switch md.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Gauge().DataPoints()) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: switch md.Sum().AggregationTemporality() { - case pdata.MetricAggregationTemporalityCumulative: + case pmetric.MetricAggregationTemporalityCumulative: if t.cfg.SendMonotonic && isCumulativeMonotonic(md) { t.mapNumberMonotonicMetrics(ctx, consumer, baseDims, md.Sum().DataPoints()) } else { t.mapNumberMetrics(ctx, consumer, baseDims, Gauge, md.Sum().DataPoints()) } - case pdata.MetricAggregationTemporalityDelta: + case pmetric.MetricAggregationTemporalityDelta: t.mapNumberMetrics(ctx, consumer, baseDims, Count, md.Sum().DataPoints()) - default: // pdata.AggregationTemporalityUnspecified or any other not supported type + default: // pmetric.MetricAggregationTemporalityUnspecified or any other not supported type t.logger.Debug("Unknown or unsupported aggregation temporality", zap.String(metricName, md.Name()), zap.Any("aggregation temporality", md.Sum().AggregationTemporality()), ) continue } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: switch md.Histogram().AggregationTemporality() { - case pdata.MetricAggregationTemporalityCumulative, pdata.MetricAggregationTemporalityDelta: - delta := md.Histogram().AggregationTemporality() == pdata.MetricAggregationTemporalityDelta + case pmetric.MetricAggregationTemporalityCumulative, pmetric.MetricAggregationTemporalityDelta: + delta := md.Histogram().AggregationTemporality() == pmetric.MetricAggregationTemporalityDelta t.mapHistogramMetrics(ctx, consumer, baseDims, md.Histogram().DataPoints(), delta) - default: // pdata.AggregationTemporalityUnspecified or any other not supported type + default: // pmetric.MetricAggregationTemporalityUnspecified or any other not supported type t.logger.Debug("Unknown or unsupported aggregation temporality", zap.String("metric name", md.Name()), zap.Any("aggregation temporality", md.Histogram().AggregationTemporality()), ) continue } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: t.mapSummaryMetrics(ctx, consumer, baseDims, md.Summary().DataPoints()) - default: // pdata.MetricDataTypeNone or any other not supported type + default: // pmetric.MetricDataTypeNone or any other not supported type t.logger.Debug("Unknown or unsupported metric type", zap.String(metricName, md.Name()), zap.Any("data type", md.DataType())) continue } diff --git a/exporter/datadogexporter/internal/model/translator/metrics_translator_test.go b/exporter/datadogexporter/internal/model/translator/metrics_translator_test.go index 3da36438364c..2c46001a8934 100644 --- a/exporter/datadogexporter/internal/model/translator/metrics_translator_test.go +++ b/exporter/datadogexporter/internal/model/translator/metrics_translator_test.go @@ -26,8 +26,9 @@ import ( gocache "github.com/patrickmn/go-cache" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -42,48 +43,48 @@ func TestIsCumulativeMonotonic(t *testing.T) { // If the receiver changes these examples should be added here too { // Sum: Cumulative but not monotonic - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("system.filesystem.usage") metric.SetDescription("Filesystem bytes used.") metric.SetUnit("bytes") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() sum.SetIsMonotonic(false) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) assert.False(t, isCumulativeMonotonic(metric)) } { // Sum: Cumulative and monotonic - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("system.network.packets") metric.SetDescription("The number of packets transferred.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) assert.True(t, isCumulativeMonotonic(metric)) } { // DoubleSumL Cumulative and monotonic - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("metric.example") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) assert.True(t, isCumulativeMonotonic(metric)) } { // Not IntSum - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("system.cpu.load_average.1m") metric.SetDescription("Average CPU Load over 1 minute.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) assert.False(t, isCumulativeMonotonic(metric)) } @@ -170,8 +171,8 @@ func newSketch(dims *Dimensions, ts uint64, s summary.Summary) sketch { } func TestMapIntMetrics(t *testing.T) { - ts := pdata.NewTimestampFromTime(time.Now()) - slice := pdata.NewNumberDataPointSlice() + ts := pcommon.NewTimestampFromTime(time.Now()) + slice := pmetric.NewNumberDataPointSlice() point := slice.AppendEmpty() point.SetIntVal(17) point.SetTimestamp(ts) @@ -205,8 +206,8 @@ func TestMapIntMetrics(t *testing.T) { } func TestMapDoubleMetrics(t *testing.T) { - ts := pdata.NewTimestampFromTime(time.Now()) - slice := pdata.NewNumberDataPointSlice() + ts := pcommon.NewTimestampFromTime(time.Now()) + slice := pmetric.NewNumberDataPointSlice() point := slice.AppendEmpty() point.SetDoubleVal(math.Pi) point.SetTimestamp(ts) @@ -239,8 +240,8 @@ func TestMapDoubleMetrics(t *testing.T) { ) } -func seconds(i int) pdata.Timestamp { - return pdata.NewTimestampFromTime(time.Unix(int64(i), 0)) +func seconds(i int) pcommon.Timestamp { + return pcommon.NewTimestampFromTime(time.Unix(int64(i), 0)) } var exampleDims = newDims("metric.example") @@ -255,7 +256,7 @@ func TestMapIntMonotonicMetrics(t *testing.T) { } //Map to OpenTelemetry format - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() slice.EnsureCapacity(len(cumulative)) for i, val := range cumulative { point := slice.AppendEmpty() @@ -278,7 +279,7 @@ func TestMapIntMonotonicMetrics(t *testing.T) { } func TestMapIntMonotonicDifferentDimensions(t *testing.T) { - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() // No tags point := slice.AppendEmpty() @@ -325,7 +326,7 @@ func TestMapIntMonotonicDifferentDimensions(t *testing.T) { func TestMapIntMonotonicWithReboot(t *testing.T) { values := []int64{0, 30, 0, 20} - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() slice.EnsureCapacity(len(values)) for i, val := range values { @@ -351,7 +352,7 @@ func TestMapIntMonotonicOutOfOrder(t *testing.T) { stamps := []int{1, 0, 2, 3} values := []int64{0, 1, 2, 3} - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() slice.EnsureCapacity(len(values)) for i, val := range values { @@ -382,7 +383,7 @@ func TestMapDoubleMonotonicMetrics(t *testing.T) { } //Map to OpenTelemetry format - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() slice.EnsureCapacity(len(cumulative)) for i, val := range cumulative { point := slice.AppendEmpty() @@ -405,7 +406,7 @@ func TestMapDoubleMonotonicMetrics(t *testing.T) { } func TestMapDoubleMonotonicDifferentDimensions(t *testing.T) { - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() // No tags point := slice.AppendEmpty() @@ -452,7 +453,7 @@ func TestMapDoubleMonotonicDifferentDimensions(t *testing.T) { func TestMapDoubleMonotonicWithReboot(t *testing.T) { values := []float64{0, 30, 0, 20} - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() slice.EnsureCapacity(len(values)) for i, val := range values { @@ -478,7 +479,7 @@ func TestMapDoubleMonotonicOutOfOrder(t *testing.T) { stamps := []int{1, 0, 2, 3} values := []float64{0, 1, 2, 3} - slice := pdata.NewNumberDataPointSlice() + slice := pmetric.NewNumberDataPointSlice() slice.EnsureCapacity(len(values)) for i, val := range values { @@ -528,8 +529,8 @@ func dimsWithBucket(dims *Dimensions, lowerBound string, upperBound string) *Dim } func TestMapDeltaHistogramMetrics(t *testing.T) { - ts := pdata.NewTimestampFromTime(time.Now()) - slice := pdata.NewHistogramDataPointSlice() + ts := pcommon.NewTimestampFromTime(time.Now()) + slice := pmetric.NewHistogramDataPointSlice() point := slice.AppendEmpty() point.SetCount(20) point.SetSum(math.Pi) @@ -686,7 +687,7 @@ func TestMapDeltaHistogramMetrics(t *testing.T) { } func TestMapCumulativeHistogramMetrics(t *testing.T) { - slice := pdata.NewHistogramDataPointSlice() + slice := pmetric.NewHistogramDataPointSlice() point := slice.AppendEmpty() point.SetCount(20) point.SetSum(math.Pi) @@ -790,7 +791,7 @@ func TestLegacyBucketsTags(t *testing.T) { tags := make([]string, 0, 10) - pointOne := pdata.NewHistogramDataPoint() + pointOne := pmetric.NewHistogramDataPoint() pointOne.SetBucketCounts([]uint64{2, 18}) pointOne.SetExplicitBounds([]float64{0}) pointOne.SetTimestamp(seconds(0)) @@ -799,7 +800,7 @@ func TestLegacyBucketsTags(t *testing.T) { tr.getLegacyBuckets(ctx, consumer, dims, pointOne, true) seriesOne := consumer.metrics - pointTwo := pdata.NewHistogramDataPoint() + pointTwo := pmetric.NewHistogramDataPoint() pointTwo.SetBucketCounts([]uint64{2, 18}) pointTwo.SetExplicitBounds([]float64{1}) pointTwo.SetTimestamp(seconds(0)) @@ -836,8 +837,8 @@ func TestFormatFloat(t *testing.T) { } } -func exampleSummaryDataPointSlice(ts pdata.Timestamp, sum float64, count uint64) pdata.SummaryDataPointSlice { - slice := pdata.NewSummaryDataPointSlice() +func exampleSummaryDataPointSlice(ts pcommon.Timestamp, sum float64, count uint64) pmetric.SummaryDataPointSlice { + slice := pmetric.NewSummaryDataPointSlice() point := slice.AppendEmpty() point.SetCount(count) point.SetSum(sum) @@ -863,7 +864,7 @@ func exampleSummaryDataPointSlice(ts pdata.Timestamp, sum float64, count uint64) } func TestMapSummaryMetrics(t *testing.T) { - ts := pdata.NewTimestampFromTime(time.Now()) + ts := pcommon.NewTimestampFromTime(time.Now()) slice := exampleSummaryDataPointSlice(ts, 10_001, 101) newTranslator := func(tags []string, quantiles bool) *Translator { @@ -941,8 +942,8 @@ const ( testHostname = "res-hostname" ) -func createTestMetrics(additionalAttributes map[string]string, name, version string) pdata.Metrics { - md := pdata.NewMetrics() +func createTestMetrics(additionalAttributes map[string]string, name, version string) pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rm := rms.AppendEmpty() @@ -962,7 +963,7 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // IntGauge met := metricsArray.AppendEmpty() met.SetName("int.gauge") - met.SetDataType(pdata.MetricDataTypeGauge) + met.SetDataType(pmetric.MetricDataTypeGauge) dpsInt := met.Gauge().DataPoints() dpInt := dpsInt.AppendEmpty() dpInt.SetTimestamp(seconds(0)) @@ -971,7 +972,7 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // DoubleGauge met = metricsArray.AppendEmpty() met.SetName("double.gauge") - met.SetDataType(pdata.MetricDataTypeGauge) + met.SetDataType(pmetric.MetricDataTypeGauge) dpsDouble := met.Gauge().DataPoints() dpDouble := dpsDouble.AppendEmpty() dpDouble.SetTimestamp(seconds(0)) @@ -980,14 +981,14 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // aggregation unspecified sum met = metricsArray.AppendEmpty() met.SetName("unspecified.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityUnspecified) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityUnspecified) // Int Sum (delta) met = metricsArray.AppendEmpty() met.SetName("int.delta.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsInt = met.Sum().DataPoints() dpInt = dpsInt.AppendEmpty() dpInt.SetTimestamp(seconds(0)) @@ -996,8 +997,8 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Double Sum (delta) met = metricsArray.AppendEmpty() met.SetName("double.delta.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsDouble = met.Sum().DataPoints() dpDouble = dpsDouble.AppendEmpty() dpDouble.SetTimestamp(seconds(0)) @@ -1006,8 +1007,8 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Int Sum (delta monotonic) met = metricsArray.AppendEmpty() met.SetName("int.delta.monotonic.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsInt = met.Sum().DataPoints() dpInt = dpsInt.AppendEmpty() dpInt.SetTimestamp(seconds(0)) @@ -1016,8 +1017,8 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Double Sum (delta monotonic) met = metricsArray.AppendEmpty() met.SetName("double.delta.monotonic.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsDouble = met.Sum().DataPoints() dpDouble = dpsDouble.AppendEmpty() dpDouble.SetTimestamp(seconds(0)) @@ -1026,14 +1027,14 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // aggregation unspecified histogram met = metricsArray.AppendEmpty() met.SetName("unspecified.histogram") - met.SetDataType(pdata.MetricDataTypeHistogram) - met.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityUnspecified) + met.SetDataType(pmetric.MetricDataTypeHistogram) + met.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityUnspecified) // Histogram (delta) met = metricsArray.AppendEmpty() met.SetName("double.histogram") - met.SetDataType(pdata.MetricDataTypeHistogram) - met.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeHistogram) + met.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsDoubleHist := met.Histogram().DataPoints() dpDoubleHist := dpsDoubleHist.AppendEmpty() dpDoubleHist.SetCount(20) @@ -1045,8 +1046,8 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Int Sum (cumulative) met = metricsArray.AppendEmpty() met.SetName("int.cumulative.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dpsInt = met.Sum().DataPoints() dpsInt.EnsureCapacity(2) dpInt = dpsInt.AppendEmpty() @@ -1056,8 +1057,8 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Double Sum (cumulative) met = metricsArray.AppendEmpty() met.SetName("double.cumulative.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dpsDouble = met.Sum().DataPoints() dpsDouble.EnsureCapacity(2) dpDouble = dpsDouble.AppendEmpty() @@ -1067,8 +1068,8 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Int Sum (cumulative monotonic) met = metricsArray.AppendEmpty() met.SetName("int.cumulative.monotonic.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) met.Sum().SetIsMonotonic(true) dpsInt = met.Sum().DataPoints() dpsInt.EnsureCapacity(2) @@ -1082,8 +1083,8 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Double Sum (cumulative monotonic) met = metricsArray.AppendEmpty() met.SetName("double.cumulative.monotonic.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) met.Sum().SetIsMonotonic(true) dpsDouble = met.Sum().DataPoints() dpsDouble.EnsureCapacity(2) @@ -1097,13 +1098,13 @@ func createTestMetrics(additionalAttributes map[string]string, name, version str // Summary met = metricsArray.AppendEmpty() met.SetName("summary") - met.SetDataType(pdata.MetricDataTypeSummary) + met.SetDataType(pmetric.MetricDataTypeSummary) slice := exampleSummaryDataPointSlice(seconds(0), 1, 1) slice.CopyTo(met.Summary().DataPoints()) met = metricsArray.AppendEmpty() met.SetName("summary") - met.SetDataType(pdata.MetricDataTypeSummary) + met.SetDataType(pmetric.MetricDataTypeSummary) slice = exampleSummaryDataPointSlice(seconds(2), 10_001, 101) slice.CopyTo(met.Summary().DataPoints()) return md @@ -1311,8 +1312,8 @@ func TestMapMetrics(t *testing.T) { } } -func createNaNMetrics() pdata.Metrics { - md := pdata.NewMetrics() +func createNaNMetrics() pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rm := rms.AppendEmpty() @@ -1325,7 +1326,7 @@ func createNaNMetrics() pdata.Metrics { // DoubleGauge met := metricsArray.AppendEmpty() met.SetName("nan.gauge") - met.SetDataType(pdata.MetricDataTypeGauge) + met.SetDataType(pmetric.MetricDataTypeGauge) dpsDouble := met.Gauge().DataPoints() dpDouble := dpsDouble.AppendEmpty() dpDouble.SetTimestamp(seconds(0)) @@ -1334,8 +1335,8 @@ func createNaNMetrics() pdata.Metrics { // Double Sum (delta) met = metricsArray.AppendEmpty() met.SetName("nan.delta.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsDouble = met.Sum().DataPoints() dpDouble = dpsDouble.AppendEmpty() dpDouble.SetTimestamp(seconds(0)) @@ -1344,8 +1345,8 @@ func createNaNMetrics() pdata.Metrics { // Double Sum (delta monotonic) met = metricsArray.AppendEmpty() met.SetName("nan.delta.monotonic.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsDouble = met.Sum().DataPoints() dpDouble = dpsDouble.AppendEmpty() dpDouble.SetTimestamp(seconds(0)) @@ -1354,8 +1355,8 @@ func createNaNMetrics() pdata.Metrics { // Histogram met = metricsArray.AppendEmpty() met.SetName("nan.histogram") - met.SetDataType(pdata.MetricDataTypeHistogram) - met.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + met.SetDataType(pmetric.MetricDataTypeHistogram) + met.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dpsDoubleHist := met.Histogram().DataPoints() dpDoubleHist := dpsDoubleHist.AppendEmpty() dpDoubleHist.SetCount(20) @@ -1367,8 +1368,8 @@ func createNaNMetrics() pdata.Metrics { // Double Sum (cumulative) met = metricsArray.AppendEmpty() met.SetName("nan.cumulative.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dpsDouble = met.Sum().DataPoints() dpsDouble.EnsureCapacity(2) dpDouble = dpsDouble.AppendEmpty() @@ -1378,8 +1379,8 @@ func createNaNMetrics() pdata.Metrics { // Double Sum (cumulative monotonic) met = metricsArray.AppendEmpty() met.SetName("nan.cumulative.monotonic.sum") - met.SetDataType(pdata.MetricDataTypeSum) - met.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + met.SetDataType(pmetric.MetricDataTypeSum) + met.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) met.Sum().SetIsMonotonic(true) dpsDouble = met.Sum().DataPoints() dpsDouble.EnsureCapacity(2) @@ -1390,13 +1391,13 @@ func createNaNMetrics() pdata.Metrics { // Summary met = metricsArray.AppendEmpty() met.SetName("nan.summary") - met.SetDataType(pdata.MetricDataTypeSummary) + met.SetDataType(pmetric.MetricDataTypeSummary) slice := exampleSummaryDataPointSlice(seconds(0), math.NaN(), 1) slice.CopyTo(met.Summary().DataPoints()) met = metricsArray.AppendEmpty() met.SetName("nan.summary") - met.SetDataType(pdata.MetricDataTypeSummary) + met.SetDataType(pmetric.MetricDataTypeSummary) slice = exampleSummaryDataPointSlice(seconds(2), 10_001, 101) slice.CopyTo(met.Summary().DataPoints()) return md diff --git a/exporter/datadogexporter/internal/model/translator/sketches_test.go b/exporter/datadogexporter/internal/model/translator/sketches_test.go index dcff60697da9..2511ae02a881 100644 --- a/exporter/datadogexporter/internal/model/translator/sketches_test.go +++ b/exporter/datadogexporter/internal/model/translator/sketches_test.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/quantile" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -43,19 +43,19 @@ func (c *sketchConsumer) ConsumeSketch( c.sk = sketch } -func newHistogramMetric(p pdata.HistogramDataPoint) pdata.Metrics { - md := pdata.NewMetrics() +func newHistogramMetric(p pmetric.HistogramDataPoint) pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rm := rms.AppendEmpty() ilms := rm.ScopeMetrics() ilm := ilms.AppendEmpty() metricsArray := ilm.Metrics() m := metricsArray.AppendEmpty() - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) m.SetName("test") // Copy Histogram point - m.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dps := m.Histogram().DataPoints() np := dps.AppendEmpty() np.SetCount(p.Count()) @@ -75,8 +75,8 @@ func TestHistogramSketches(t *testing.T) { // with support [0, N], generate an OTLP Histogram data point with N buckets, // (-inf, 0], (0, 1], ..., (N-1, N], (N, inf) // which contains N*M uniform samples of the distribution. - fromCDF := func(cdf func(x float64) float64) pdata.Metrics { - p := pdata.NewHistogramDataPoint() + fromCDF := func(cdf func(x float64) float64) pmetric.Metrics { + p := pmetric.NewHistogramDataPoint() bounds := make([]float64, N+1) buckets := make([]uint64, N+2) buckets[0] = 0 @@ -182,7 +182,7 @@ func TestHistogramSketches(t *testing.T) { func TestExactSumCount(t *testing.T) { tests := []struct { name string - getHist func() pdata.Metrics + getHist func() pmetric.Metrics sum float64 count uint64 }{} @@ -191,22 +191,22 @@ func TestExactSumCount(t *testing.T) { tests = append(tests, struct { name string - getHist func() pdata.Metrics + getHist func() pmetric.Metrics sum float64 count uint64 }{ name: "Uniform distribution (delta)", - getHist: func() pdata.Metrics { - md := pdata.NewMetrics() + getHist: func() pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rm := rms.AppendEmpty() ilms := rm.ScopeMetrics() ilm := ilms.AppendEmpty() metricsArray := ilm.Metrics() m := metricsArray.AppendEmpty() - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) m.SetName("test") - m.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dp := m.Histogram().DataPoints() p := dp.AppendEmpty() p.SetExplicitBounds([]float64{0, 5_000, 10_000, 15_000, 20_000}) @@ -222,22 +222,22 @@ func TestExactSumCount(t *testing.T) { struct { name string - getHist func() pdata.Metrics + getHist func() pmetric.Metrics sum float64 count uint64 }{ name: "Uniform distribution (cumulative)", - getHist: func() pdata.Metrics { - md := pdata.NewMetrics() + getHist: func() pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rm := rms.AppendEmpty() ilms := rm.ScopeMetrics() ilm := ilms.AppendEmpty() metricsArray := ilm.Metrics() m := metricsArray.AppendEmpty() - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) m.SetName("test") - m.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := m.Histogram().DataPoints() // Points from contrib issue 6129: 0, 5_000, 10_000, 15_000, 20_000 repeated. bounds := []float64{0, 5_000, 10_000, 15_000, 20_000} @@ -261,23 +261,23 @@ func TestExactSumCount(t *testing.T) { val := val tests = append(tests, struct { name string - getHist func() pdata.Metrics + getHist func() pmetric.Metrics sum float64 count uint64 }{ name: fmt.Sprintf("Issue 7065 (%d, %f)", pos, val), - getHist: func() pdata.Metrics { - md := pdata.NewMetrics() + getHist: func() pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rm := rms.AppendEmpty() ilms := rm.ScopeMetrics() ilm := ilms.AppendEmpty() metricsArray := ilm.Metrics() m := metricsArray.AppendEmpty() - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) m.SetName("test") - m.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) bounds := []float64{1_000, 10_000, 100_000} dp := m.Histogram().DataPoints() @@ -319,12 +319,12 @@ func TestInfiniteBounds(t *testing.T) { tests := []struct { name string - getHist func() pdata.Metrics + getHist func() pmetric.Metrics }{ { name: "(-inf, inf): 100", - getHist: func() pdata.Metrics { - p := pdata.NewHistogramDataPoint() + getHist: func() pmetric.Metrics { + p := pmetric.NewHistogramDataPoint() p.SetExplicitBounds([]float64{}) p.SetBucketCounts([]uint64{100}) p.SetCount(100) @@ -334,8 +334,8 @@ func TestInfiniteBounds(t *testing.T) { }, { name: "(-inf, 0]: 100, (0, +inf]: 100", - getHist: func() pdata.Metrics { - p := pdata.NewHistogramDataPoint() + getHist: func() pmetric.Metrics { + p := pmetric.NewHistogramDataPoint() p.SetExplicitBounds([]float64{0}) p.SetBucketCounts([]uint64{100, 100}) p.SetCount(200) @@ -345,8 +345,8 @@ func TestInfiniteBounds(t *testing.T) { }, { name: "(-inf, -1]: 100, (-1, 1]: 10, (1, +inf]: 100", - getHist: func() pdata.Metrics { - p := pdata.NewHistogramDataPoint() + getHist: func() pmetric.Metrics { + p := pmetric.NewHistogramDataPoint() p.SetExplicitBounds([]float64{-1, 1}) p.SetBucketCounts([]uint64{100, 10, 100}) p.SetCount(210) diff --git a/exporter/datadogexporter/internal/testutils/test_utils.go b/exporter/datadogexporter/internal/testutils/test_utils.go index e4895e81d314..908f84680e58 100644 --- a/exporter/datadogexporter/internal/testutils/test_utils.go +++ b/exporter/datadogexporter/internal/testutils/test_utils.go @@ -20,7 +20,9 @@ import ( "net/http" "net/http/httptest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) var ( @@ -85,30 +87,30 @@ func newMetadataEndpoint(c chan []byte) func(http.ResponseWriter, *http.Request) } } -func fillAttributeMap(attrs pdata.Map, mp map[string]string) { +func fillAttributeMap(attrs pcommon.Map, mp map[string]string) { attrs.Clear() attrs.EnsureCapacity(len(mp)) for k, v := range mp { - attrs.Insert(k, pdata.NewValueString(v)) + attrs.Insert(k, pcommon.NewValueString(v)) } } // NewAttributeMap creates a new attribute map (string only) // from a Go map -func NewAttributeMap(mp map[string]string) pdata.Map { - attrs := pdata.NewMap() +func NewAttributeMap(mp map[string]string) pcommon.Map { + attrs := pcommon.NewMap() fillAttributeMap(attrs, mp) return attrs } -func newMetricsWithAttributeMap(mp map[string]string) pdata.Metrics { - md := pdata.NewMetrics() +func newMetricsWithAttributeMap(mp map[string]string) pmetric.Metrics { + md := pmetric.NewMetrics() fillAttributeMap(md.ResourceMetrics().AppendEmpty().Resource().Attributes(), mp) return md } -func newTracesWithAttributeMap(mp map[string]string) pdata.Traces { - traces := pdata.NewTraces() +func newTracesWithAttributeMap(mp map[string]string) ptrace.Traces { + traces := ptrace.NewTraces() resourceSpans := traces.ResourceSpans() rs := resourceSpans.AppendEmpty() fillAttributeMap(rs.Resource().Attributes(), mp) diff --git a/exporter/datadogexporter/internal/utils/trace_helpers.go b/exporter/datadogexporter/internal/utils/trace_helpers.go index 91e279860ce3..32fddbb2e509 100644 --- a/exporter/datadogexporter/internal/utils/trace_helpers.go +++ b/exporter/datadogexporter/internal/utils/trace_helpers.go @@ -20,7 +20,7 @@ import ( "unicode/utf8" "github.com/DataDog/datadog-agent/pkg/trace/exportable/pb" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) // constants for tags @@ -114,7 +114,7 @@ func NormalizeSpanName(tag string, isService bool) string { } // NormalizeSpanKind returns a span kind with the SPAN_KIND prefix trimmed off -func NormalizeSpanKind(kind pdata.SpanKind) string { +func NormalizeSpanKind(kind ptrace.SpanKind) string { return strings.TrimPrefix(kind.String(), "SPAN_KIND_") } diff --git a/exporter/datadogexporter/metrics_exporter.go b/exporter/datadogexporter/metrics_exporter.go index 5f04cf522950..69ac84b1586f 100644 --- a/exporter/datadogexporter/metrics_exporter.go +++ b/exporter/datadogexporter/metrics_exporter.go @@ -23,7 +23,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "go.uber.org/zap" "gopkg.in/zorkian/go-datadog-api.v2" @@ -153,17 +154,17 @@ func (exp *metricsExporter) pushSketches(ctx context.Context, sl sketches.Sketch return nil } -func (exp *metricsExporter) PushMetricsDataScrubbed(ctx context.Context, md pdata.Metrics) error { +func (exp *metricsExporter) PushMetricsDataScrubbed(ctx context.Context, md pmetric.Metrics) error { return exp.scrubber.Scrub(exp.PushMetricsData(ctx, md)) } -func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pdata.Metrics) error { +func (exp *metricsExporter) PushMetricsData(ctx context.Context, md pmetric.Metrics) error { // Start host metadata with resource attributes from // the first payload. if exp.cfg.HostMetadata.Enabled { exp.onceMetadata.Do(func() { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() if md.ResourceMetrics().Len() > 0 { attrs = md.ResourceMetrics().At(0).Resource().Attributes() } diff --git a/exporter/datadogexporter/traces_exporter.go b/exporter/datadogexporter/traces_exporter.go index 16d897c7787f..55c8f6df80e6 100644 --- a/exporter/datadogexporter/traces_exporter.go +++ b/exporter/datadogexporter/traces_exporter.go @@ -24,7 +24,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/exportable/pb" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "gopkg.in/zorkian/go-datadog-api.v2" @@ -102,7 +103,7 @@ func newTracesExporter(ctx context.Context, params component.ExporterCreateSetti // return nil // } -func (exp *traceExporter) pushTraceDataScrubbed(ctx context.Context, td pdata.Traces) error { +func (exp *traceExporter) pushTraceDataScrubbed(ctx context.Context, td ptrace.Traces) error { return exp.scrubber.Scrub(exp.pushTraceData(ctx, td)) } @@ -111,14 +112,14 @@ var _ consumer.ConsumeTracesFunc = (*traceExporter)(nil).pushTraceData func (exp *traceExporter) pushTraceData( ctx context.Context, - td pdata.Traces, + td ptrace.Traces, ) error { // Start host metadata with resource attributes from // the first payload. if exp.cfg.HostMetadata.Enabled { exp.onceMetadata.Do(func() { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() if td.ResourceSpans().Len() > 0 { attrs = td.ResourceSpans().At(0).Resource().Attributes() } diff --git a/exporter/datadogexporter/traces_exporter_test.go b/exporter/datadogexporter/traces_exporter_test.go index 0a2acba161b0..b8051696f566 100644 --- a/exporter/datadogexporter/traces_exporter_test.go +++ b/exporter/datadogexporter/traces_exporter_test.go @@ -31,14 +31,15 @@ import ( "go.opentelemetry.io/collector/component/componenttest" otelconfig "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/confignet" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/config" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/metadata" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/internal/testutils" ) -func testTracesExporterHelper(td pdata.Traces, t *testing.T) []string { +func testTracesExporterHelper(td ptrace.Traces, t *testing.T) []string { metricsServer := testutils.DatadogServerMock() defer metricsServer.Close() @@ -221,12 +222,12 @@ func TestTraceAndStatsExporter(t *testing.T) { assert.Equal(t, "application/x-protobuf", got[0]) } -func simpleTraces() pdata.Traces { - return simpleTracesWithID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) +func simpleTraces() ptrace.Traces { + return simpleTracesWithID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) } -func simpleTracesWithID(traceID pdata.TraceID) pdata.Traces { - traces := pdata.NewTraces() +func simpleTracesWithID(traceID pcommon.TraceID) ptrace.Traces { + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(traceID) return traces } diff --git a/exporter/datadogexporter/translate_traces.go b/exporter/datadogexporter/translate_traces.go index 1e7d2d48931f..f272282b2172 100644 --- a/exporter/datadogexporter/translate_traces.go +++ b/exporter/datadogexporter/translate_traces.go @@ -24,8 +24,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/exportable/pb" "github.com/DataDog/datadog-agent/pkg/trace/exportable/traceutil" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "gopkg.in/zorkian/go-datadog-api.v2" @@ -70,7 +71,7 @@ const ( const AttributeExceptionEventName = "exception" // converts Traces into an array of datadog trace payloads grouped by env -func convertToDatadogTd(td pdata.Traces, fallbackHost string, cfg *config.Config, blk *denylister, buildInfo component.BuildInfo) ([]*pb.TracePayload, []datadog.Metric) { +func convertToDatadogTd(td ptrace.Traces, fallbackHost string, cfg *config.Config, blk *denylister, buildInfo component.BuildInfo) ([]*pb.TracePayload, []datadog.Metric) { // TODO: // do we apply other global tags, like version+service, to every span or only root spans of a service // should globalTags['service'] take precedence over a trace's resource.service.name? I don't believe so, need to confirm @@ -82,7 +83,7 @@ func convertToDatadogTd(td pdata.Traces, fallbackHost string, cfg *config.Config seenHosts := make(map[string]struct{}) seenTags := make(map[string]struct{}) var series []datadog.Metric - pushTime := pdata.NewTimestampFromTime(time.Now()) + pushTime := pcommon.NewTimestampFromTime(time.Now()) spanNameMap := cfg.Traces.SpanNameRemappings @@ -150,7 +151,7 @@ func aggregateTracePayloadsByEnv(tracePayloads []*pb.TracePayload) []*pb.TracePa } // converts a Trace's resource spans into a trace payload -func resourceSpansToDatadogSpans(rs pdata.ResourceSpans, hostname string, cfg *config.Config, blk *denylister, spanNameMap map[string]string) pb.TracePayload { +func resourceSpansToDatadogSpans(rs ptrace.ResourceSpans, hostname string, cfg *config.Config, blk *denylister, spanNameMap map[string]string) pb.TracePayload { // get env tag env := utils.NormalizeTag(cfg.Env) @@ -241,7 +242,7 @@ func resourceSpansToDatadogSpans(rs pdata.ResourceSpans, hostname string, cfg *c } // convertSpan takes an internal span representation and returns a Datadog span. -func spanToDatadogSpan(s pdata.Span, +func spanToDatadogSpan(s ptrace.Span, serviceName string, datadogTags map[string]string, cfg *config.Config, @@ -336,7 +337,7 @@ func spanToDatadogSpan(s pdata.Span, } func resourceToDatadogServiceNameAndAttributeMap( - resource pdata.Resource, + resource pcommon.Resource, ) (serviceName string, datadogTags map[string]string) { attrs := resource.Attributes() // predefine capacity where possible with extra for _dd.tags.container payload and duplicate env tag @@ -346,7 +347,7 @@ func resourceToDatadogServiceNameAndAttributeMap( return tracetranslator.ResourceNoServiceName, datadogTags } - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { datadogTags[k] = v.AsString() return true }) @@ -377,7 +378,7 @@ func extractDatadogServiceName(datadogTags map[string]string) string { return serviceName } -func extractInstrumentationLibraryTags(il pdata.InstrumentationScope, datadogTags map[string]string) { +func extractInstrumentationLibraryTags(il pcommon.InstrumentationScope, datadogTags map[string]string) { if ilName := il.Name(); ilName != "" { datadogTags[conventions.OtelLibraryName] = ilName } @@ -386,7 +387,7 @@ func extractInstrumentationLibraryTags(il pdata.InstrumentationScope, datadogTag } } -func aggregateSpanTags(span pdata.Span, datadogTags map[string]string) map[string]string { +func aggregateSpanTags(span ptrace.Span, datadogTags map[string]string) map[string]string { // predefine capacity as at most the size attributes and global tags // there may be overlap between the two. spanTags := make(map[string]string, span.Attributes().Len()+len(datadogTags)) @@ -395,7 +396,7 @@ func aggregateSpanTags(span pdata.Span, datadogTags map[string]string) map[strin spanTags[utils.NormalizeTag(key)] = val } - span.Attributes().Range(func(k string, v pdata.Value) bool { + span.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case keySamplingPriority: spanTags[k] = v.AsString() @@ -422,9 +423,9 @@ func aggregateSpanTags(span pdata.Span, datadogTags map[string]string) map[strin // in datadog currently are redis and memcached, so those are the only two db.system // attribute values we have to check to determine whether it's a db or cache span // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md#semantic-conventions-for-database-client-calls -func inferDatadogType(kind pdata.SpanKind, datadogTags map[string]string) string { +func inferDatadogType(kind ptrace.SpanKind, datadogTags map[string]string) string { switch kind { - case pdata.SpanKindClient: + case ptrace.SpanKindClient: if dbSysOtlp, ok := datadogTags[conventions.AttributeDBSystem]; ok { if dbSysOtlp == kindRedis || dbSysOtlp == kindMemcached { return kindCache @@ -434,7 +435,7 @@ func inferDatadogType(kind pdata.SpanKind, datadogTags map[string]string) string } return kindHTTP - case pdata.SpanKindServer: + case ptrace.SpanKindServer: return kindWeb default: return kindCustom @@ -507,7 +508,7 @@ func decodeAPMId(id string) uint64 { return val } -func getDatadogSpanName(s pdata.Span, datadogTags map[string]string) string { +func getDatadogSpanName(s ptrace.Span, datadogTags map[string]string) string { // largely a port of logic here // https://github.com/open-telemetry/opentelemetry-python/blob/b2559409b2bf82e693f3e68ed890dd7fd1fa8eae/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L213 // Get span name by using instrumentation library name and span kind while backing off to span.kind @@ -529,7 +530,7 @@ func getDatadogSpanName(s pdata.Span, datadogTags map[string]string) string { return utils.NormalizeSpanName(fmt.Sprintf("%s.%s", "opentelemetry", utils.NormalizeSpanKind(s.Kind())), false) } -func getDatadogResourceName(s pdata.Span, datadogTags map[string]string) string { +func getDatadogResourceName(s ptrace.Span, datadogTags map[string]string) string { // largely a port of logic here // https://github.com/open-telemetry/opentelemetry-python/blob/b2559409b2bf82e693f3e68ed890dd7fd1fa8eae/exporter/opentelemetry-exporter-datadog/src/opentelemetry/exporter/datadog/exporter.py#L229 // Get span resource name by checking for existence http.method + http.route 'GET /api' @@ -568,14 +569,14 @@ func getDatadogResourceName(s pdata.Span, datadogTags map[string]string) string return s.Name() } -func getSpanErrorAndSetTags(s pdata.Span, tags map[string]string) int32 { +func getSpanErrorAndSetTags(s ptrace.Span, tags map[string]string) int32 { var isError int32 // Set Span Status and any response or error details status := s.Status() switch status.Code() { - case pdata.StatusCodeOk: + case ptrace.StatusCodeOk: isError = okCode - case pdata.StatusCodeError: + case ptrace.StatusCodeError: isError = errorCode default: isError = okCode @@ -616,7 +617,7 @@ func getSpanErrorAndSetTags(s pdata.Span, tags map[string]string) int32 { if httpStatusCode >= 500 { isError = errorCode // for 400 type, mark as error if it is an http client - } else if s.Kind() == pdata.SpanKindClient && httpStatusCode >= 400 { + } else if s.Kind() == ptrace.SpanKindClient && httpStatusCode >= 400 { isError = errorCode } } @@ -638,7 +639,7 @@ func getSpanErrorAndSetTags(s pdata.Span, tags map[string]string) int32 { // in the case that these events differ. // // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/exceptions.md#attributes -func extractErrorTagsFromEvents(s pdata.Span, tags map[string]string) { +func extractErrorTagsFromEvents(s ptrace.Span, tags map[string]string) { evts := s.Events() for i := evts.Len() - 1; i >= 0; i-- { evt := evts.At(i) @@ -668,7 +669,7 @@ func extractErrorTagsFromEvents(s pdata.Span, tags map[string]string) { // // TODO: Expose configuration option for collecting Span Events as Logs within Datadog // and add forwarding to Logs API intake. -func eventsToString(evts pdata.SpanEventSlice) string { +func eventsToString(evts ptrace.SpanEventSlice) string { eventArray := make([]map[string]interface{}, 0, evts.Len()) for i := 0; i < evts.Len(); i++ { spanEvent := evts.At(i) diff --git a/exporter/datadogexporter/translate_traces_test.go b/exporter/datadogexporter/translate_traces_test.go index daca29c05686..e84134f6d99a 100644 --- a/exporter/datadogexporter/translate_traces_test.go +++ b/exporter/datadogexporter/translate_traces_test.go @@ -29,8 +29,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/ext" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter/config" @@ -49,36 +50,36 @@ func RandStringBytes(n int) string { return string(b) } -func NewResourceSpansData(mockTraceID [16]byte, mockSpanID [8]byte, mockParentSpanID [8]byte, statusCode pdata.StatusCode, resourceEnvAndService bool, endTime time.Time) pdata.ResourceSpans { +func NewResourceSpansData(mockTraceID [16]byte, mockSpanID [8]byte, mockParentSpanID [8]byte, statusCode ptrace.StatusCode, resourceEnvAndService bool, endTime time.Time) ptrace.ResourceSpans { // The goal of this test is to ensure that each span in - // pdata.ResourceSpans is transformed to its *trace.SpanData correctly! + // ptrace.ResourceSpans is transformed to its *trace.SpanData correctly! - pdataEndTime := pdata.NewTimestampFromTime(endTime) + pdataEndTime := pcommon.NewTimestampFromTime(endTime) startTime := endTime.Add(-90 * time.Second) - pdataStartTime := pdata.NewTimestampFromTime(startTime) + pdataStartTime := pcommon.NewTimestampFromTime(startTime) - rs := pdata.NewResourceSpans() + rs := ptrace.NewResourceSpans() ilss := rs.ScopeSpans() ils := ilss.AppendEmpty() ils.Scope().SetName("test_il_name") ils.Scope().SetVersion("test_il_version") span := ils.Spans().AppendEmpty() - traceID := pdata.NewTraceID(mockTraceID) - spanID := pdata.NewSpanID(mockSpanID) - parentSpanID := pdata.NewSpanID(mockParentSpanID) + traceID := pcommon.NewTraceID(mockTraceID) + spanID := pcommon.NewSpanID(mockSpanID) + parentSpanID := pcommon.NewSpanID(mockParentSpanID) span.SetTraceID(traceID) span.SetSpanID(spanID) span.SetParentSpanID(parentSpanID) span.SetName("End-To-End Here") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) span.SetStartTimestamp(pdataStartTime) span.SetEndTimestamp(pdataEndTime) span.SetTraceState("tracestatekey=tracestatevalue") status := span.Status() - if statusCode == pdata.StatusCodeError { - status.SetCode(pdata.StatusCodeError) + if statusCode == ptrace.StatusCodeError { + status.SetCode(ptrace.StatusCodeError) status.SetMessage("This is not a drill!") } else { status.SetCode(statusCode) @@ -102,11 +103,11 @@ func NewResourceSpansData(mockTraceID [16]byte, mockSpanID [8]byte, mockParentSp "agent": "ocagent", } - if statusCode == pdata.StatusCodeError { + if statusCode == ptrace.StatusCodeError { attribs["http.status_code"] = "501" } - pdata.NewMapFromRaw(attribs).CopyTo(span.Attributes()) + pcommon.NewMapFromRaw(attribs).CopyTo(span.Attributes()) resource := rs.Resource() @@ -126,7 +127,7 @@ func NewResourceSpansData(mockTraceID [16]byte, mockSpanID [8]byte, mockParentSp } func TestConvertToDatadogTd(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty() denylister := newDenylister([]string{}) buildInfo := component.BuildInfo{ @@ -140,7 +141,7 @@ func TestConvertToDatadogTd(t *testing.T) { } func TestConvertToDatadogTdNoResourceSpans(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() denylister := newDenylister([]string{}) buildInfo := component.BuildInfo{ Version: "1.0", @@ -153,21 +154,21 @@ func TestConvertToDatadogTdNoResourceSpans(t *testing.T) { } func TestRunningTraces(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rts := td.ResourceSpans() rt := rts.AppendEmpty() resAttrs := rt.Resource().Attributes() - resAttrs.Insert(attributes.AttributeDatadogHostname, pdata.NewValueString("resource-hostname-1")) + resAttrs.Insert(attributes.AttributeDatadogHostname, pcommon.NewValueString("resource-hostname-1")) rt = rts.AppendEmpty() resAttrs = rt.Resource().Attributes() - resAttrs.Insert(attributes.AttributeDatadogHostname, pdata.NewValueString("resource-hostname-1")) + resAttrs.Insert(attributes.AttributeDatadogHostname, pcommon.NewValueString("resource-hostname-1")) rt = rts.AppendEmpty() resAttrs = rt.Resource().Attributes() - resAttrs.Insert(attributes.AttributeDatadogHostname, pdata.NewValueString("resource-hostname-2")) + resAttrs.Insert(attributes.AttributeDatadogHostname, pcommon.NewValueString("resource-hostname-2")) rts.AppendEmpty() @@ -191,7 +192,7 @@ func TestRunningTraces(t *testing.T) { } func TestRunningTracesARN(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rts := td.ResourceSpans() rm := rts.AppendEmpty() @@ -238,7 +239,7 @@ func TestObfuscation(t *testing.T) { Version: "1.0", } - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() resource := rs.Resource() resource.Attributes().InsertString("service.name", "sure") @@ -276,7 +277,7 @@ func TestBasicTracesTranslation(t *testing.T) { // create mock resource span data // set shouldError and resourceServiceandEnv to false to test defaut behavior - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeUnset, false, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeUnset, false, mockEndTime) // translate mocks to datadog traces datadogPayload := resourceSpansToDatadogSpans(rs, hostname, &config.Config{}, denylister, map[string]string{}) @@ -302,13 +303,13 @@ func TestBasicTracesTranslation(t *testing.T) { assert.Equal(t, decodeAPMSpanID(mockParentSpanID), datadogPayload.Traces[0].Spans[0].ParentID) // ensure original TraceID is preserved - assert.Equal(t, pdata.NewTraceID(mockTraceID).HexString(), datadogPayload.Traces[0].Spans[0].Meta["otel.trace_id"]) + assert.Equal(t, pcommon.NewTraceID(mockTraceID).HexString(), datadogPayload.Traces[0].Spans[0].Meta["otel.trace_id"]) // ensure that span.resource defaults to otlp span.name assert.Equal(t, "End-To-End Here", datadogPayload.Traces[0].Spans[0].Resource) // ensure that span.name defaults to string representing instrumentation library if present - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", datadogPayload.Traces[0].Spans[0].Meta[conventions.OtelLibraryName], strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), datadogPayload.Traces[0].Spans[0].Name) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", datadogPayload.Traces[0].Spans[0].Meta[conventions.OtelLibraryName], strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), datadogPayload.Traces[0].Spans[0].Name) // ensure that span.type is based on otlp span.kind assert.Equal(t, "web", datadogPayload.Traces[0].Spans[0].Type) @@ -330,8 +331,8 @@ func TestBasicTracesTranslation(t *testing.T) { assert.NotNil(t, datadogPayload.Traces[0].Spans[0].Start) assert.NotNil(t, datadogPayload.Traces[0].Spans[0].Duration) - pdataMockEndTime := pdata.NewTimestampFromTime(mockEndTime) - pdataMockStartTime := pdata.NewTimestampFromTime(mockEndTime.Add(-90 * time.Second)) + pdataMockEndTime := pcommon.NewTimestampFromTime(mockEndTime) + pdataMockStartTime := pcommon.NewTimestampFromTime(mockEndTime.Add(-90 * time.Second)) mockEventsString := fmt.Sprintf("[{\"attributes\":{},\"name\":\"start\",\"time\":%d},{\"attributes\":{\"flag\":false},\"name\":\"end\",\"time\":%d}]", pdataMockStartTime, pdataMockEndTime) // ensure that events tag is set if span events exist and contains structured json fields @@ -352,7 +353,7 @@ func TestBasicTracesDenylist(t *testing.T) { // create mock resource span data // set shouldError and resourceServiceandEnv to false to test defaut behavior - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeUnset, false, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeUnset, false, mockEndTime) // translate mocks to datadog traces datadogPayload := resourceSpansToDatadogSpans(rs, hostname, &config.Config{}, denylister, map[string]string{}) @@ -378,7 +379,7 @@ func TestTracesTranslationErrorsAndResource(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeError, true, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeError, true, mockEndTime) // translate mocks to datadog traces cfg := config.Config{ @@ -430,31 +431,31 @@ func TestTracesFallbackErrorMessage(t *testing.T) { mockSpanID := [8]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8} mockParentSpanID := [8]byte{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8} mockEndTime := time.Now().Round(time.Second) - pdataEndTime := pdata.NewTimestampFromTime(mockEndTime) + pdataEndTime := pcommon.NewTimestampFromTime(mockEndTime) startTime := mockEndTime.Add(-90 * time.Second) - pdataStartTime := pdata.NewTimestampFromTime(startTime) + pdataStartTime := pcommon.NewTimestampFromTime(startTime) - rs := pdata.NewResourceSpans() + rs := ptrace.NewResourceSpans() ilss := rs.ScopeSpans() ils := ilss.AppendEmpty() ils.Scope().SetName("test_il_name") ils.Scope().SetVersion("test_il_version") span := ils.Spans().AppendEmpty() - traceID := pdata.NewTraceID(mockTraceID) - spanID := pdata.NewSpanID(mockSpanID) - parentSpanID := pdata.NewSpanID(mockParentSpanID) + traceID := pcommon.NewTraceID(mockTraceID) + spanID := pcommon.NewSpanID(mockSpanID) + parentSpanID := pcommon.NewSpanID(mockParentSpanID) span.SetTraceID(traceID) span.SetSpanID(spanID) span.SetParentSpanID(parentSpanID) span.SetName("End-To-End Here") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) span.SetStartTimestamp(pdataStartTime) span.SetEndTimestamp(pdataEndTime) span.SetTraceState("tracestatekey=tracestatevalue") status := span.Status() - status.SetCode(pdata.StatusCodeError) + status.SetCode(ptrace.StatusCodeError) span.Attributes().InsertString(conventions.AttributeHTTPStatusCode, "404") span.Attributes().InsertString("http.status_text", "Not Found") @@ -489,7 +490,7 @@ func TestTracesTranslationErrorsFromEventsUsesLast(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeError, true, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeError, true, mockEndTime) span := rs.ScopeSpans().At(0).Spans().At(0) events := span.Events() @@ -503,7 +504,7 @@ func TestTracesTranslationErrorsFromEventsUsesLast(t *testing.T) { event = events.AppendEmpty() event.SetName(AttributeExceptionEventName) - pdata.NewMapFromRaw(attribs).CopyTo(event.Attributes()) + pcommon.NewMapFromRaw(attribs).CopyTo(event.Attributes()) event = events.AppendEmpty() event.SetName("end") @@ -543,7 +544,7 @@ func TestTracesTranslationErrorsFromEventsBounds(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeError, true, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeError, true, mockEndTime) span := rs.ScopeSpans().At(0).Spans().At(0) events := span.Events() events.EnsureCapacity(3) @@ -557,7 +558,7 @@ func TestTracesTranslationErrorsFromEventsBounds(t *testing.T) { evt := events.AppendEmpty() evt.SetName(AttributeExceptionEventName) - pdata.NewMapFromRaw(attribs).CopyTo(evt.Attributes()) + pcommon.NewMapFromRaw(attribs).CopyTo(evt.Attributes()) evt = events.AppendEmpty() evt.SetName("start") @@ -588,13 +589,13 @@ func TestTracesTranslationErrorsFromEventsBounds(t *testing.T) { // Now with the error event at the end of the list... events.At(0).SetName("start") // Reset the attributes - pdata.NewMap().CopyTo(events.At(0).Attributes()) + pcommon.NewMap().CopyTo(events.At(0).Attributes()) events.At(1).SetName("end") events.At(1).Attributes().InsertBool("flag", false) events.At(2).SetName(AttributeExceptionEventName) - pdata.NewMapFromRaw(attribs).CopyTo(events.At(2).Attributes()) + pcommon.NewMapFromRaw(attribs).CopyTo(events.At(2).Attributes()) // Ensure the error type is copied over assert.Equal(t, attribs[conventions.AttributeExceptionType].(string), datadogPayload.Traces[0].Spans[0].Meta[ext.ErrorType]) @@ -619,7 +620,7 @@ func TestTracesTranslationOkStatus(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeError, true, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeError, true, mockEndTime) // translate mocks to datadog traces cfg := config.Config{ @@ -669,7 +670,7 @@ func TestTracesTranslationConfig(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeUnset, true, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeUnset, true, mockEndTime) cfg := config.Config{ TagsConfig: config.TagsConfig{ @@ -707,7 +708,7 @@ func TestTracesTranslationNoIls(t *testing.T) { hostname := "testhostname" denylister := newDenylister([]string{}) - rs := pdata.NewResourceSpans() + rs := ptrace.NewResourceSpans() cfg := config.Config{ TagsConfig: config.TagsConfig{ @@ -740,7 +741,7 @@ func TestTracesTranslationInvalidService(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeUnset, false, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeUnset, false, mockEndTime) // add a tab and an invalid character to see if it gets normalized cfgInvalidService := config.Config{ @@ -797,7 +798,7 @@ func TestTracesTranslationServicePeerName(t *testing.T) { // create mock resource span data // set shouldError and resourceServiceandEnv to false to test defaut behavior - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeUnset, false, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeUnset, false, mockEndTime) span := rs.ScopeSpans().At(0).Spans().At(0) span.Attributes().InsertString(conventions.AttributePeerService, "my_peer_service_name") @@ -828,7 +829,7 @@ func TestTracesTranslationServicePeerName(t *testing.T) { assert.Equal(t, "End-To-End Here", datadogPayload.Traces[0].Spans[0].Resource) // ensure that span.name defaults to string representing instrumentation library if present - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", datadogPayload.Traces[0].Spans[0].Meta[conventions.OtelLibraryName], strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), datadogPayload.Traces[0].Spans[0].Name) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", datadogPayload.Traces[0].Spans[0].Meta[conventions.OtelLibraryName], strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), datadogPayload.Traces[0].Spans[0].Name) // ensure that span.type is based on otlp span.kind assert.Equal(t, "web", datadogPayload.Traces[0].Spans[0].Type) @@ -850,8 +851,8 @@ func TestTracesTranslationServicePeerName(t *testing.T) { assert.NotNil(t, datadogPayload.Traces[0].Spans[0].Start) assert.NotNil(t, datadogPayload.Traces[0].Spans[0].Duration) - pdataMockEndTime := pdata.NewTimestampFromTime(mockEndTime) - pdataMockStartTime := pdata.NewTimestampFromTime(mockEndTime.Add(-90 * time.Second)) + pdataMockEndTime := pcommon.NewTimestampFromTime(mockEndTime) + pdataMockStartTime := pcommon.NewTimestampFromTime(mockEndTime.Add(-90 * time.Second)) mockEventsString := fmt.Sprintf("[{\"attributes\":{},\"name\":\"start\",\"time\":%d},{\"attributes\":{\"flag\":false},\"name\":\"end\",\"time\":%d}]", pdataMockStartTime, pdataMockEndTime) // ensure that events tag is set if span events exist and contains structured json fields @@ -871,7 +872,7 @@ func TestTracesTranslationTruncatetag(t *testing.T) { // create mock resource span data // set shouldError and resourceServiceandEnv to false to test defaut behavior - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeUnset, false, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeUnset, false, mockEndTime) span := rs.ScopeSpans().At(0).Spans().At(0) @@ -903,7 +904,7 @@ func TestTracesTranslationTruncatetag(t *testing.T) { assert.Equal(t, "End-To-End Here", datadogPayload.Traces[0].Spans[0].Resource) // ensure that span.name defaults to string representing instrumentation library if present - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", datadogPayload.Traces[0].Spans[0].Meta[conventions.OtelLibraryName], strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), datadogPayload.Traces[0].Spans[0].Name) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", datadogPayload.Traces[0].Spans[0].Meta[conventions.OtelLibraryName], strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), datadogPayload.Traces[0].Spans[0].Name) // ensure that span.type is based on otlp span.kind assert.Equal(t, "web", datadogPayload.Traces[0].Spans[0].Type) @@ -925,8 +926,8 @@ func TestTracesTranslationTruncatetag(t *testing.T) { assert.NotNil(t, datadogPayload.Traces[0].Spans[0].Start) assert.NotNil(t, datadogPayload.Traces[0].Spans[0].Duration) - pdataMockEndTime := pdata.NewTimestampFromTime(mockEndTime) - pdataMockStartTime := pdata.NewTimestampFromTime(mockEndTime.Add(-90 * time.Second)) + pdataMockEndTime := pcommon.NewTimestampFromTime(mockEndTime) + pdataMockStartTime := pcommon.NewTimestampFromTime(mockEndTime.Add(-90 * time.Second)) mockEventsString := fmt.Sprintf("[{\"attributes\":{},\"name\":\"start\",\"time\":%d},{\"attributes\":{\"flag\":false},\"name\":\"end\",\"time\":%d}]", pdataMockStartTime, pdataMockEndTime) // ensure that events tag is set if span events exist and contains structured json fields @@ -935,8 +936,8 @@ func TestTracesTranslationTruncatetag(t *testing.T) { // ensure that datadog span resource naming uses http method+route when available func TestSpanResourceTranslation(t *testing.T) { - span := pdata.NewSpan() - span.SetKind(pdata.SpanKindServer) + span := ptrace.NewSpan() + span.SetKind(ptrace.SpanKindServer) span.SetName("Default Name") ddHTTPTags := map[string]string{ @@ -958,8 +959,8 @@ func TestSpanResourceTranslation(t *testing.T) { // ensure that datadog span resource naming uses http method+ grpc path when available func TestSpanResourceTranslationGRPC(t *testing.T) { - span := pdata.NewSpan() - span.SetKind(pdata.SpanKindServer) + span := ptrace.NewSpan() + span.SetKind(ptrace.SpanKindServer) span.SetName("Default Name") ddHTTPTags := map[string]string{ @@ -981,8 +982,8 @@ func TestSpanResourceTranslationGRPC(t *testing.T) { // ensure that datadog span resource naming uses messaging operation+destination when available func TestSpanResourceTranslationMessaging(t *testing.T) { - span := pdata.NewSpan() - span.SetKind(pdata.SpanKindServer) + span := ptrace.NewSpan() + span.SetKind(ptrace.SpanKindServer) span.SetName("Default Name") ddHTTPTags := map[string]string{ @@ -1004,8 +1005,8 @@ func TestSpanResourceTranslationMessaging(t *testing.T) { // ensure that datadog span resource naming uses messaging operation even when destination is not available func TestSpanResourceTranslationMessagingFallback(t *testing.T) { - span := pdata.NewSpan() - span.SetKind(pdata.SpanKindServer) + span := ptrace.NewSpan() + span.SetKind(ptrace.SpanKindServer) span.SetName("Default Name") ddHTTPTags := map[string]string{ @@ -1026,8 +1027,8 @@ func TestSpanResourceTranslationMessagingFallback(t *testing.T) { // ensure that datadog span resource naming uses rpc method + rpc service when available func TestSpanResourceTranslationRpc(t *testing.T) { - span := pdata.NewSpan() - span.SetKind(pdata.SpanKindServer) + span := ptrace.NewSpan() + span.SetKind(ptrace.SpanKindServer) span.SetName("Default Name") ddHTTPTags := map[string]string{ @@ -1049,8 +1050,8 @@ func TestSpanResourceTranslationRpc(t *testing.T) { // ensure that datadog span resource naming uses rpc method even when rpc service is not available func TestSpanResourceTranslationRpcFallback(t *testing.T) { - span := pdata.NewSpan() - span.SetKind(pdata.SpanKindServer) + span := ptrace.NewSpan() + span.SetKind(ptrace.SpanKindServer) span.SetName("Default Name") ddHTTPTags := map[string]string{ @@ -1071,9 +1072,9 @@ func TestSpanResourceTranslationRpcFallback(t *testing.T) { // ensure that the datadog span name uses IL name +kind when available and falls back to opetelemetry + kind func TestSpanNameTranslation(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("Default Name") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) ddIlTags := map[string]string{ fmt.Sprintf(conventions.OtelLibraryName): "il_name", @@ -1106,12 +1107,12 @@ func TestSpanNameTranslation(t *testing.T) { spanNameUnusual := getDatadogSpanName(span, ddIlTagsUnusual) spanNameHyphen := getDatadogSpanName(span, ddIlTagsHyphen) - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "il_name", strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), spanNameIl) - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "opentelemetry", strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), spanNameDefault) - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "old_value", strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), spanNameOld) - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "current_value", strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), spanNameCur) - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "unusual_value", strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), spanNameUnusual) - assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "hyphenated_value", strings.TrimPrefix(pdata.SpanKindServer.String(), "SPAN_KIND_"))), spanNameHyphen) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "il_name", strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), spanNameIl) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "opentelemetry", strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), spanNameDefault) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "old_value", strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), spanNameOld) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "current_value", strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), spanNameCur) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "unusual_value", strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), spanNameUnusual) + assert.Equal(t, strings.ToLower(fmt.Sprintf("%s.%s", "hyphenated_value", strings.TrimPrefix(ptrace.SpanKindServer.String(), "SPAN_KIND_"))), spanNameHyphen) } // ensure that the datadog span name uses IL name +kind when available and falls back to opetelemetry + kind @@ -1145,9 +1146,9 @@ func TestSpanNameNormalization(t *testing.T) { // ensure that the datadog span type gets mapped from span kind func TestSpanTypeTranslation(t *testing.T) { - spanTypeClient := inferDatadogType(pdata.SpanKindClient, map[string]string{}) - spanTypeServer := inferDatadogType(pdata.SpanKindServer, map[string]string{}) - spanTypeCustom := inferDatadogType(pdata.SpanKindUnspecified, map[string]string{}) + spanTypeClient := inferDatadogType(ptrace.SpanKindClient, map[string]string{}) + spanTypeServer := inferDatadogType(ptrace.SpanKindServer, map[string]string{}) + spanTypeCustom := inferDatadogType(ptrace.SpanKindUnspecified, map[string]string{}) ddTagsDb := map[string]string{ "db.system": "postgresql", @@ -1161,9 +1162,9 @@ func TestSpanTypeTranslation(t *testing.T) { "db.system": "memcached", } - spanTypeDb := inferDatadogType(pdata.SpanKindClient, ddTagsDb) - spanTypeCache := inferDatadogType(pdata.SpanKindClient, ddTagsCache) - spanTypeCacheAlt := inferDatadogType(pdata.SpanKindClient, ddTagsCacheAlt) + spanTypeDb := inferDatadogType(ptrace.SpanKindClient, ddTagsDb) + spanTypeCache := inferDatadogType(ptrace.SpanKindClient, ddTagsCache) + spanTypeCacheAlt := inferDatadogType(ptrace.SpanKindClient, ddTagsCacheAlt) assert.Equal(t, "http", spanTypeClient) assert.Equal(t, "web", spanTypeServer) @@ -1175,7 +1176,7 @@ func TestSpanTypeTranslation(t *testing.T) { // ensure that the IL Tags extraction handles nil case func TestILTagsExctraction(t *testing.T) { - il := pdata.NewInstrumentationScope() + il := pcommon.NewInstrumentationScope() tags := map[string]string{} @@ -1186,15 +1187,15 @@ func TestILTagsExctraction(t *testing.T) { } func TestHttpResourceTag(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("Default Name") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) ddTags := map[string]string{ "http.method": "POST", } - resourceName := getDatadogResourceName(pdata.Span{}, ddTags) + resourceName := getDatadogResourceName(ptrace.Span{}, ddTags) assert.Equal(t, "POST", resourceName) } @@ -1268,7 +1269,7 @@ func TestStatsAggregations(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeError, true, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeError, true, mockEndTime) // translate mocks to datadog traces cfg := config.Config{} @@ -1310,7 +1311,7 @@ func TestSamplingWeightedStatsAggregations(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeError, true, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeError, true, mockEndTime) rs.ScopeSpans().EnsureCapacity(1) ilss := rs.ScopeSpans().AppendEmpty() @@ -1319,7 +1320,7 @@ func TestSamplingWeightedStatsAggregations(t *testing.T) { instrumentationLibrary.SetVersion("v1") span := ilss.Spans().AppendEmpty() span.Attributes().InsertString("_sample_rate", "0.2") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) // translate mocks to datadog traces cfg := config.Config{} @@ -1349,7 +1350,7 @@ func TestSanitization(t *testing.T) { Version: "1.0", } - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(1) rs := traces.ResourceSpans().AppendEmpty() resource := rs.Resource() @@ -1438,16 +1439,16 @@ func TestSpanNameMapping(t *testing.T) { mockSpanID := [8]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8} mockParentSpanID := [8]byte{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8} endTime := time.Now().Round(time.Second) - pdataEndTime := pdata.NewTimestampFromTime(endTime) + pdataEndTime := pcommon.NewTimestampFromTime(endTime) startTime := endTime.Add(-90 * time.Second) - pdataStartTime := pdata.NewTimestampFromTime(startTime) + pdataStartTime := pcommon.NewTimestampFromTime(startTime) denylister := newDenylister([]string{}) buildInfo := component.BuildInfo{ Version: "1.0", } - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(1) rs := traces.ResourceSpans().AppendEmpty() resource := rs.Resource() @@ -1459,14 +1460,14 @@ func TestSpanNameMapping(t *testing.T) { instrumentationLibrary.SetVersion("v1") span := ilss.Spans().AppendEmpty() - traceID := pdata.NewTraceID(mockTraceID) - spanID := pdata.NewSpanID(mockSpanID) - parentSpanID := pdata.NewSpanID(mockParentSpanID) + traceID := pcommon.NewTraceID(mockTraceID) + spanID := pcommon.NewSpanID(mockSpanID) + parentSpanID := pcommon.NewSpanID(mockParentSpanID) span.SetTraceID(traceID) span.SetSpanID(spanID) span.SetParentSpanID(parentSpanID) span.SetName("End-To-End Here") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) span.SetStartTimestamp(pdataStartTime) span.SetEndTimestamp(pdataEndTime) @@ -1488,16 +1489,16 @@ func TestSpanEnvClobbering(t *testing.T) { mockSpanID := [8]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8} mockParentSpanID := [8]byte{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8} endTime := time.Now().Round(time.Second) - pdataEndTime := pdata.NewTimestampFromTime(endTime) + pdataEndTime := pcommon.NewTimestampFromTime(endTime) startTime := endTime.Add(-90 * time.Second) - pdataStartTime := pdata.NewTimestampFromTime(startTime) + pdataStartTime := pcommon.NewTimestampFromTime(startTime) denylister := newDenylister([]string{}) buildInfo := component.BuildInfo{ Version: "1.0", } - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(1) rs := traces.ResourceSpans().AppendEmpty() resource := rs.Resource() @@ -1511,14 +1512,14 @@ func TestSpanEnvClobbering(t *testing.T) { instrumentationLibrary.SetVersion("v1") span := ilss.Spans().AppendEmpty() - traceID := pdata.NewTraceID(mockTraceID) - spanID := pdata.NewSpanID(mockSpanID) - parentSpanID := pdata.NewSpanID(mockParentSpanID) + traceID := pcommon.NewTraceID(mockTraceID) + spanID := pcommon.NewSpanID(mockSpanID) + parentSpanID := pcommon.NewSpanID(mockParentSpanID) span.SetTraceID(traceID) span.SetSpanID(spanID) span.SetParentSpanID(parentSpanID) span.SetName("End-To-End Here") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) span.SetStartTimestamp(pdataStartTime) span.SetEndTimestamp(pdataEndTime) @@ -1535,16 +1536,16 @@ func TestSpanRateLimitTag(t *testing.T) { mockSpanID := [8]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8} mockParentSpanID := [8]byte{0xEF, 0xEE, 0xED, 0xEC, 0xEB, 0xEA, 0xE9, 0xE8} endTime := time.Now().Round(time.Second) - pdataEndTime := pdata.NewTimestampFromTime(endTime) + pdataEndTime := pcommon.NewTimestampFromTime(endTime) startTime := endTime.Add(-90 * time.Second) - pdataStartTime := pdata.NewTimestampFromTime(startTime) + pdataStartTime := pcommon.NewTimestampFromTime(startTime) denylister := newDenylister([]string{}) buildInfo := component.BuildInfo{ Version: "1.0", } - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(1) rs := traces.ResourceSpans().AppendEmpty() resource := rs.Resource() @@ -1561,16 +1562,16 @@ func TestSpanRateLimitTag(t *testing.T) { "_sample_rate": "0.5", } - pdata.NewMapFromRaw(attribs).CopyTo(span.Attributes()) + pcommon.NewMapFromRaw(attribs).CopyTo(span.Attributes()) - traceID := pdata.NewTraceID(mockTraceID) - spanID := pdata.NewSpanID(mockSpanID) - parentSpanID := pdata.NewSpanID(mockParentSpanID) + traceID := pcommon.NewTraceID(mockTraceID) + spanID := pcommon.NewSpanID(mockSpanID) + parentSpanID := pcommon.NewSpanID(mockParentSpanID) span.SetTraceID(traceID) span.SetSpanID(spanID) span.SetParentSpanID(parentSpanID) span.SetName("End-To-End Here") - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) span.SetStartTimestamp(pdataStartTime) span.SetEndTimestamp(pdataEndTime) @@ -1592,7 +1593,7 @@ func TestTracesSpanNamingOption(t *testing.T) { // create mock resource span data // toggle on errors and custom service naming to test edge case code paths - rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, pdata.StatusCodeUnset, false, mockEndTime) + rs := NewResourceSpansData(mockTraceID, mockSpanID, mockParentSpanID, ptrace.StatusCodeUnset, false, mockEndTime) // start with span name as resource name set to true cfgSpanNameAsResourceName := config.Config{ diff --git a/exporter/dynatraceexporter/go.mod b/exporter/dynatraceexporter/go.mod index 7ad28bd53619..4c1f0e696f09 100644 --- a/exporter/dynatraceexporter/go.mod +++ b/exporter/dynatraceexporter/go.mod @@ -7,13 +7,13 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -22,13 +22,12 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -36,10 +35,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -51,3 +48,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/commo replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../../pkg/resourcetotelemetry replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/dynatraceexporter/go.sum b/exporter/dynatraceexporter/go.sum index 1f916fdec71b..3d0a9802185f 100644 --- a/exporter/dynatraceexporter/go.sum +++ b/exporter/dynatraceexporter/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -18,16 +17,14 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -40,7 +37,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -93,7 +89,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -125,8 +120,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -168,9 +163,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -186,10 +178,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -200,7 +192,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -239,8 +231,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -264,13 +256,11 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -296,7 +286,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -306,7 +295,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -320,7 +308,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/exporter/dynatraceexporter/internal/serialization/gauge.go b/exporter/dynatraceexporter/internal/serialization/gauge.go index 4f6c79040f0b..b8cbb28b3fc7 100644 --- a/exporter/dynatraceexporter/internal/serialization/gauge.go +++ b/exporter/dynatraceexporter/internal/serialization/gauge.go @@ -19,18 +19,18 @@ import ( dtMetric "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric" "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) -func serializeGauge(name, prefix string, dims dimensions.NormalizedDimensionList, dp pdata.NumberDataPoint) (string, error) { +func serializeGauge(name, prefix string, dims dimensions.NormalizedDimensionList, dp pmetric.NumberDataPoint) (string, error) { var metricOption dtMetric.MetricOption switch dp.ValueType() { - case pdata.MetricValueTypeNone: + case pmetric.MetricValueTypeNone: return "", fmt.Errorf("unsupported value type none") - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: metricOption = dtMetric.WithIntGaugeValue(dp.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: metricOption = dtMetric.WithFloatGaugeValue(dp.DoubleVal()) default: return "", fmt.Errorf("unknown data type") diff --git a/exporter/dynatraceexporter/internal/serialization/gauge_test.go b/exporter/dynatraceexporter/internal/serialization/gauge_test.go index 976693f606fd..819125db61d4 100644 --- a/exporter/dynatraceexporter/internal/serialization/gauge_test.go +++ b/exporter/dynatraceexporter/internal/serialization/gauge_test.go @@ -20,14 +20,15 @@ import ( "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func Test_serializeGauge(t *testing.T) { t.Run("float with prefix and dimension", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetDoubleVal(5.5) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) got, err := serializeGauge("dbl_gauge", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), dp) assert.NoError(t, err) @@ -35,9 +36,9 @@ func Test_serializeGauge(t *testing.T) { }) t.Run("int with prefix and dimension", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetIntVal(5) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) got, err := serializeGauge("int_gauge", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), dp) assert.NoError(t, err) @@ -45,7 +46,7 @@ func Test_serializeGauge(t *testing.T) { }) t.Run("without timestamp", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetIntVal(5) got, err := serializeGauge("int_gauge", "prefix", dimensions.NewNormalizedDimensionList(), dp) diff --git a/exporter/dynatraceexporter/internal/serialization/histogram.go b/exporter/dynatraceexporter/internal/serialization/histogram.go index 9164739d0e14..7d8b7eea56f6 100644 --- a/exporter/dynatraceexporter/internal/serialization/histogram.go +++ b/exporter/dynatraceexporter/internal/serialization/histogram.go @@ -19,11 +19,11 @@ import ( dtMetric "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric" "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) -func serializeHistogram(name, prefix string, dims dimensions.NormalizedDimensionList, t pdata.MetricAggregationTemporality, dp pdata.HistogramDataPoint) (string, error) { - if t == pdata.MetricAggregationTemporalityCumulative { +func serializeHistogram(name, prefix string, dims dimensions.NormalizedDimensionList, t pmetric.MetricAggregationTemporality, dp pmetric.HistogramDataPoint) (string, error) { + if t == pmetric.MetricAggregationTemporalityCumulative { // convert to delta histogram // skip first point because there is nothing to calculate a delta from // what if bucket bounds change @@ -59,7 +59,7 @@ func serializeHistogram(name, prefix string, dims dimensions.NormalizedDimension } // estimateHistMinMax returns the estimated minimum and maximum value in the histogram by using the min and max non-empty buckets. -func estimateHistMinMax(dp pdata.HistogramDataPoint) (float64, float64) { +func estimateHistMinMax(dp pmetric.HistogramDataPoint) (float64, float64) { bounds := dp.ExplicitBounds() counts := dp.BucketCounts() diff --git a/exporter/dynatraceexporter/internal/serialization/histogram_test.go b/exporter/dynatraceexporter/internal/serialization/histogram_test.go index 6e62b86f9a9a..9d1f0862ef9a 100644 --- a/exporter/dynatraceexporter/internal/serialization/histogram_test.go +++ b/exporter/dynatraceexporter/internal/serialization/histogram_test.go @@ -20,38 +20,39 @@ import ( "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func Test_serializeHistogram(t *testing.T) { - hist := pdata.NewHistogramDataPoint() + hist := pmetric.NewHistogramDataPoint() hist.SetExplicitBounds([]float64{0, 2, 4, 8}) hist.SetBucketCounts([]uint64{0, 1, 0, 1, 0}) hist.SetCount(2) hist.SetSum(9.5) - hist.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + hist.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - histWithNonEmptyFirstLast := pdata.NewHistogramDataPoint() + histWithNonEmptyFirstLast := pmetric.NewHistogramDataPoint() histWithNonEmptyFirstLast.SetExplicitBounds([]float64{0, 2, 4, 8}) histWithNonEmptyFirstLast.SetBucketCounts([]uint64{0, 1, 0, 1, 1}) histWithNonEmptyFirstLast.SetCount(3) histWithNonEmptyFirstLast.SetSum(9.5) - histWithNonEmptyFirstLast.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + histWithNonEmptyFirstLast.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) t.Run("delta with prefix and dimension", func(t *testing.T) { - got, err := serializeHistogram("delta_hist", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityDelta, hist) + got, err := serializeHistogram("delta_hist", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityDelta, hist) assert.NoError(t, err) assert.Equal(t, "prefix.delta_hist,key=value gauge,min=0,max=8,sum=9.5,count=2 1626438600000", got) }) t.Run("delta with non-empty first and last bucket", func(t *testing.T) { - got, err := serializeHistogram("delta_nonempty_first_last_hist", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityDelta, histWithNonEmptyFirstLast) + got, err := serializeHistogram("delta_nonempty_first_last_hist", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityDelta, histWithNonEmptyFirstLast) assert.NoError(t, err) assert.Equal(t, "prefix.delta_nonempty_first_last_hist,key=value gauge,min=0,max=8,sum=9.5,count=3 1626438600000", got) }) t.Run("cumulative with prefix and dimension", func(t *testing.T) { - got, err := serializeHistogram("hist", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityCumulative, hist) + got, err := serializeHistogram("hist", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityCumulative, hist) assert.Error(t, err) assert.Equal(t, "", got) }) diff --git a/exporter/dynatraceexporter/internal/serialization/serialization.go b/exporter/dynatraceexporter/internal/serialization/serialization.go index 7760c90b32ec..d92defd75fc8 100644 --- a/exporter/dynatraceexporter/internal/serialization/serialization.go +++ b/exporter/dynatraceexporter/internal/serialization/serialization.go @@ -18,20 +18,21 @@ import ( "fmt" "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap" ) -func SerializeMetric(logger *zap.Logger, prefix string, metric pdata.Metric, defaultDimensions, staticDimensions dimensions.NormalizedDimensionList, prev *ttlmap.TTLMap) ([]string, error) { +func SerializeMetric(logger *zap.Logger, prefix string, metric pmetric.Metric, defaultDimensions, staticDimensions dimensions.NormalizedDimensionList, prev *ttlmap.TTLMap) ([]string, error) { var metricLines []string ce := logger.Check(zap.DebugLevel, "SerializeMetric") var points int switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: points = metric.Gauge().DataPoints().Len() for i := 0; i < metric.Gauge().DataPoints().Len(); i++ { dp := metric.Gauge().DataPoints().At(i) @@ -51,7 +52,7 @@ func SerializeMetric(logger *zap.Logger, prefix string, metric pdata.Metric, def metricLines = append(metricLines, line) } } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: points = metric.Sum().DataPoints().Len() for i := 0; i < metric.Sum().DataPoints().Len(); i++ { dp := metric.Sum().DataPoints().At(i) @@ -73,7 +74,7 @@ func SerializeMetric(logger *zap.Logger, prefix string, metric pdata.Metric, def metricLines = append(metricLines, line) } } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: points = metric.Histogram().DataPoints().Len() for i := 0; i < metric.Histogram().DataPoints().Len(); i++ { dp := metric.Histogram().DataPoints().At(i) @@ -105,10 +106,10 @@ func SerializeMetric(logger *zap.Logger, prefix string, metric pdata.Metric, def return metricLines, nil } -func makeCombinedDimensions(labels pdata.Map, defaultDimensions, staticDimensions dimensions.NormalizedDimensionList) dimensions.NormalizedDimensionList { +func makeCombinedDimensions(labels pcommon.Map, defaultDimensions, staticDimensions dimensions.NormalizedDimensionList) dimensions.NormalizedDimensionList { dimsFromLabels := []dimensions.Dimension{} - labels.Range(func(k string, v pdata.Value) bool { + labels.Range(func(k string, v pcommon.Value) bool { dimsFromLabels = append(dimsFromLabels, dimensions.NewDimension(k, v.AsString())) return true }) diff --git a/exporter/dynatraceexporter/internal/serialization/sum.go b/exporter/dynatraceexporter/internal/serialization/sum.go index 5554ae557aed..cdc0238d278e 100644 --- a/exporter/dynatraceexporter/internal/serialization/sum.go +++ b/exporter/dynatraceexporter/internal/serialization/sum.go @@ -19,34 +19,35 @@ import ( dtMetric "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric" "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap" ) -func serializeSum(name, prefix string, dims dimensions.NormalizedDimensionList, t pdata.MetricAggregationTemporality, dp pdata.NumberDataPoint, prev *ttlmap.TTLMap) (string, error) { +func serializeSum(name, prefix string, dims dimensions.NormalizedDimensionList, t pmetric.MetricAggregationTemporality, dp pmetric.NumberDataPoint, prev *ttlmap.TTLMap) (string, error) { switch t { - case pdata.MetricAggregationTemporalityCumulative: + case pmetric.MetricAggregationTemporalityCumulative: return serializeCumulativeCounter(name, prefix, dims, dp, prev) // for now unspecified is treated as delta - case pdata.MetricAggregationTemporalityUnspecified: + case pmetric.MetricAggregationTemporalityUnspecified: fallthrough - case pdata.MetricAggregationTemporalityDelta: + case pmetric.MetricAggregationTemporalityDelta: return serializeDeltaCounter(name, prefix, dims, dp) } return "", nil } -func serializeDeltaCounter(name, prefix string, dims dimensions.NormalizedDimensionList, dp pdata.NumberDataPoint) (string, error) { +func serializeDeltaCounter(name, prefix string, dims dimensions.NormalizedDimensionList, dp pmetric.NumberDataPoint) (string, error) { var valueOpt dtMetric.MetricOption switch dp.ValueType() { - case pdata.MetricValueTypeNone: + case pmetric.MetricValueTypeNone: return "", fmt.Errorf("unsupported value type none") - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: valueOpt = dtMetric.WithIntCounterValueDelta(dp.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: valueOpt = dtMetric.WithFloatCounterValueDelta(dp.DoubleVal()) default: return "", fmt.Errorf("unknown data type") @@ -67,7 +68,7 @@ func serializeDeltaCounter(name, prefix string, dims dimensions.NormalizedDimens return dm.Serialize() } -func serializeCumulativeCounter(name, prefix string, dims dimensions.NormalizedDimensionList, dp pdata.NumberDataPoint, prev *ttlmap.TTLMap) (string, error) { +func serializeCumulativeCounter(name, prefix string, dims dimensions.NormalizedDimensionList, dp pmetric.NumberDataPoint, prev *ttlmap.TTLMap) (string, error) { dm, err := convertTotalCounterToDelta(name, prefix, dims, dp, prev) if err != nil { @@ -81,10 +82,10 @@ func serializeCumulativeCounter(name, prefix string, dims dimensions.NormalizedD return dm.Serialize() } -func convertTotalCounterToDelta(name, prefix string, dims dimensions.NormalizedDimensionList, dp pdata.NumberDataPoint, prevCounters *ttlmap.TTLMap) (*dtMetric.Metric, error) { +func convertTotalCounterToDelta(name, prefix string, dims dimensions.NormalizedDimensionList, dp pmetric.NumberDataPoint, prevCounters *ttlmap.TTLMap) (*dtMetric.Metric, error) { id := name - dp.Attributes().Sort().Range(func(k string, v pdata.Value) bool { + dp.Attributes().Sort().Range(func(k string, v pcommon.Value) bool { id += fmt.Sprintf(",%s=%s", k, v.AsString()) return true }) @@ -96,7 +97,7 @@ func convertTotalCounterToDelta(name, prefix string, dims dimensions.NormalizedD return nil, nil } - oldCount := prevCounter.(pdata.NumberDataPoint) + oldCount := prevCounter.(pmetric.NumberDataPoint) if oldCount.Timestamp().AsTime().After(dp.Timestamp().AsTime()) { // current point is older than the previous point @@ -110,9 +111,9 @@ func convertTotalCounterToDelta(name, prefix string, dims dimensions.NormalizedD return nil, fmt.Errorf("expected %s to be type %s but got %s - count reset", name, metricValueTypeToString(oldCount.ValueType()), metricValueTypeToString(dp.ValueType())) } - if dp.ValueType() == pdata.MetricValueTypeInt { + if dp.ValueType() == pmetric.MetricValueTypeInt { valueOpt = dtMetric.WithIntCounterValueDelta(dp.IntVal() - oldCount.IntVal()) - } else if dp.ValueType() == pdata.MetricValueTypeDouble { + } else if dp.ValueType() == pmetric.MetricValueTypeDouble { valueOpt = dtMetric.WithFloatCounterValueDelta(dp.DoubleVal() - oldCount.DoubleVal()) } else { return nil, fmt.Errorf("%s value type %s not supported", name, metricValueTypeToString(dp.ValueType())) @@ -135,13 +136,13 @@ func convertTotalCounterToDelta(name, prefix string, dims dimensions.NormalizedD return dm, err } -func metricValueTypeToString(t pdata.MetricValueType) string { +func metricValueTypeToString(t pmetric.MetricValueType) string { switch t { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return "MetricValueTypeDouble" - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return "MericValueTypeInt" - case pdata.MetricValueTypeNone: + case pmetric.MetricValueTypeNone: return "MericValueTypeNone" default: return "MetricValueTypeUnknown" diff --git a/exporter/dynatraceexporter/internal/serialization/sum_test.go b/exporter/dynatraceexporter/internal/serialization/sum_test.go index eb404e752b16..6d754ae4031d 100644 --- a/exporter/dynatraceexporter/internal/serialization/sum_test.go +++ b/exporter/dynatraceexporter/internal/serialization/sum_test.go @@ -20,114 +20,115 @@ import ( "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/ttlmap" ) func Test_serializeSum(t *testing.T) { t.Run("without timestamp", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetIntVal(5) - got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(), pdata.MetricAggregationTemporalityDelta, dp, ttlmap.New(1, 1)) + got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(), pmetric.MetricAggregationTemporalityDelta, dp, ttlmap.New(1, 1)) assert.NoError(t, err) assert.Equal(t, "prefix.int_sum count,delta=5", got) }) t.Run("float delta with prefix and dimension", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetDoubleVal(5.5) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) prev := ttlmap.New(1, 1) - got, err := serializeSum("double_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityDelta, dp, prev) + got, err := serializeSum("double_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityDelta, dp, prev) assert.NoError(t, err) assert.Equal(t, "prefix.double_sum,key=value count,delta=5.5 1626438600000", got) }) t.Run("int delta with prefix and dimension", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetIntVal(5) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityDelta, dp, ttlmap.New(1, 1)) + got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityDelta, dp, ttlmap.New(1, 1)) assert.NoError(t, err) assert.Equal(t, "prefix.int_sum,key=value count,delta=5 1626438600000", got) }) t.Run("float cumulative with prefix and dimension", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetDoubleVal(5.5) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - dp2 := pdata.NewNumberDataPoint() + dp2 := pmetric.NewNumberDataPoint() dp2.SetDoubleVal(7.0) - dp2.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 31, 0, 0, time.UTC).UnixNano())) + dp2.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 31, 0, 0, time.UTC).UnixNano())) prev := ttlmap.New(1, 1) - got, err := serializeSum("double_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityCumulative, dp, prev) + got, err := serializeSum("double_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityCumulative, dp, prev) assert.NoError(t, err) assert.Equal(t, "", got) - got, err = serializeSum("double_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityCumulative, dp2, prev) + got, err = serializeSum("double_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityCumulative, dp2, prev) assert.NoError(t, err) assert.Equal(t, "prefix.double_sum,key=value count,delta=1.5 1626438660000", got) }) t.Run("int cumulative with prefix and dimension", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetIntVal(5) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - dp2 := pdata.NewNumberDataPoint() + dp2 := pmetric.NewNumberDataPoint() dp2.SetIntVal(10) - dp2.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 31, 0, 0, time.UTC).UnixNano())) + dp2.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 31, 0, 0, time.UTC).UnixNano())) prev := ttlmap.New(1, 1) - got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityCumulative, dp, prev) + got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityCumulative, dp, prev) assert.NoError(t, err) assert.Equal(t, "", got) - got, err = serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityCumulative, dp2, prev) + got, err = serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityCumulative, dp2, prev) assert.NoError(t, err) assert.Equal(t, "prefix.int_sum,key=value count,delta=5 1626438660000", got) }) t.Run("different dimensions should be treated as separate counters", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetIntVal(5) - dp.Attributes().Insert("sort", pdata.NewValueString("unstable")) - dp.Attributes().Insert("group", pdata.NewValueString("a")) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.Attributes().Insert("sort", pcommon.NewValueString("unstable")) + dp.Attributes().Insert("group", pcommon.NewValueString("a")) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - dp2 := pdata.NewNumberDataPoint() + dp2 := pmetric.NewNumberDataPoint() dp2.SetIntVal(10) - dp2.Attributes().Insert("sort", pdata.NewValueString("unstable")) - dp2.Attributes().Insert("group", pdata.NewValueString("b")) - dp2.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp2.Attributes().Insert("sort", pcommon.NewValueString("unstable")) + dp2.Attributes().Insert("group", pcommon.NewValueString("b")) + dp2.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - dp3 := pdata.NewNumberDataPoint() + dp3 := pmetric.NewNumberDataPoint() dp3.SetIntVal(10) - dp3.Attributes().Insert("group", pdata.NewValueString("a")) - dp3.Attributes().Insert("sort", pdata.NewValueString("unstable")) - dp3.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp3.Attributes().Insert("group", pcommon.NewValueString("a")) + dp3.Attributes().Insert("sort", pcommon.NewValueString("unstable")) + dp3.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - dp4 := pdata.NewNumberDataPoint() + dp4 := pmetric.NewNumberDataPoint() dp4.SetIntVal(20) - dp4.Attributes().Insert("group", pdata.NewValueString("b")) - dp4.Attributes().Insert("sort", pdata.NewValueString("unstable")) - dp4.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp4.Attributes().Insert("group", pcommon.NewValueString("b")) + dp4.Attributes().Insert("sort", pcommon.NewValueString("unstable")) + dp4.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) prev := ttlmap.New(1, 1) - got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "a")), pdata.MetricAggregationTemporalityCumulative, dp, prev) - got2, err2 := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "b")), pdata.MetricAggregationTemporalityCumulative, dp2, prev) - got3, err3 := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "a")), pdata.MetricAggregationTemporalityCumulative, dp3, prev) - got4, err4 := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "b")), pdata.MetricAggregationTemporalityCumulative, dp4, prev) + got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "a")), pmetric.MetricAggregationTemporalityCumulative, dp, prev) + got2, err2 := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "b")), pmetric.MetricAggregationTemporalityCumulative, dp2, prev) + got3, err3 := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "a")), pmetric.MetricAggregationTemporalityCumulative, dp3, prev) + got4, err4 := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "b")), pmetric.MetricAggregationTemporalityCumulative, dp4, prev) assert.NoError(t, err) assert.NoError(t, err2) @@ -140,23 +141,23 @@ func Test_serializeSum(t *testing.T) { }) t.Run("count values older than the previous count value are dropped", func(t *testing.T) { - dp := pdata.NewNumberDataPoint() + dp := pmetric.NewNumberDataPoint() dp.SetIntVal(5) - dp.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) + dp.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano())) - dp2 := pdata.NewNumberDataPoint() + dp2 := pmetric.NewNumberDataPoint() dp2.SetIntVal(5) - dp2.SetTimestamp(pdata.Timestamp(time.Date(2021, 07, 16, 12, 29, 0, 0, time.UTC).UnixNano())) + dp2.SetTimestamp(pcommon.Timestamp(time.Date(2021, 07, 16, 12, 29, 0, 0, time.UTC).UnixNano())) prev := ttlmap.New(1, 1) - got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityCumulative, dp, prev) + got, err := serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityCumulative, dp, prev) assert.NoError(t, err) assert.Equal(t, "", got) assert.Equal(t, dp, prev.Get("int_sum")) - got, err = serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pdata.MetricAggregationTemporalityCumulative, dp2, prev) + got, err = serializeSum("int_sum", "prefix", dimensions.NewNormalizedDimensionList(dimensions.NewDimension("key", "value")), pmetric.MetricAggregationTemporalityCumulative, dp2, prev) assert.NoError(t, err) assert.Equal(t, "", got) diff --git a/exporter/dynatraceexporter/metrics_exporter.go b/exporter/dynatraceexporter/metrics_exporter.go index faee9fde4725..175bb008739e 100644 --- a/exporter/dynatraceexporter/metrics_exporter.go +++ b/exporter/dynatraceexporter/metrics_exporter.go @@ -28,7 +28,7 @@ import ( "github.com/dynatrace-oss/dynatrace-metric-utils-go/metric/dimensions" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/config" @@ -92,7 +92,7 @@ func dimensionsFromTags(tags []string) dimensions.NormalizedDimensionList { return dimensions.NewNormalizedDimensionList(dims...) } -func (e *exporter) PushMetricsData(ctx context.Context, md pdata.Metrics) error { +func (e *exporter) PushMetricsData(ctx context.Context, md pmetric.Metrics) error { if e.isDisabled { return nil } @@ -118,7 +118,7 @@ func (e *exporter) PushMetricsData(ctx context.Context, md pdata.Metrics) error return nil } -func (e *exporter) serializeMetrics(md pdata.Metrics) []string { +func (e *exporter) serializeMetrics(md pmetric.Metrics) []string { lines := make([]string, 0) resourceMetrics := md.ResourceMetrics() diff --git a/exporter/dynatraceexporter/metrics_exporter_test.go b/exporter/dynatraceexporter/metrics_exporter_test.go index c2e3ac405065..afd750d04beb 100644 --- a/exporter/dynatraceexporter/metrics_exporter_test.go +++ b/exporter/dynatraceexporter/metrics_exporter_test.go @@ -31,13 +31,14 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/dynatraceexporter/config" ) -var testTimestamp = pdata.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano()) +var testTimestamp = pcommon.Timestamp(time.Date(2021, 07, 16, 12, 30, 0, 0, time.UTC).UnixNano()) func Test_exporter_PushMetricsData(t *testing.T) { sent := "not sent" @@ -54,7 +55,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { })) defer ts.Close() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().EnsureCapacity(2) rm := md.ResourceMetrics().AppendEmpty() @@ -71,7 +72,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { noneMetric.SetName("none") intGaugeMetric := metrics.AppendEmpty() - intGaugeMetric.SetDataType(pdata.MetricDataTypeGauge) + intGaugeMetric.SetDataType(pmetric.MetricDataTypeGauge) intGaugeMetric.SetName("int_gauge") intGauge := intGaugeMetric.Gauge() intGaugeDataPoints := intGauge.DataPoints() @@ -80,7 +81,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { intGaugeDataPoint.SetTimestamp(testTimestamp) intSumMetric := metrics.AppendEmpty() - intSumMetric.SetDataType(pdata.MetricDataTypeSum) + intSumMetric.SetDataType(pmetric.MetricDataTypeSum) intSumMetric.SetName("int_sum") intSum := intSumMetric.Sum() intSumDataPoints := intSum.DataPoints() @@ -89,7 +90,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { intSumDataPoint.SetTimestamp(testTimestamp) doubleGaugeMetric := metrics.AppendEmpty() - doubleGaugeMetric.SetDataType(pdata.MetricDataTypeGauge) + doubleGaugeMetric.SetDataType(pmetric.MetricDataTypeGauge) doubleGaugeMetric.SetName("double_gauge") doubleGauge := doubleGaugeMetric.Gauge() doubleGaugeDataPoints := doubleGauge.DataPoints() @@ -98,7 +99,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { doubleGaugeDataPoint.SetTimestamp(testTimestamp) doubleSumMetric := metrics.AppendEmpty() - doubleSumMetric.SetDataType(pdata.MetricDataTypeSum) + doubleSumMetric.SetDataType(pmetric.MetricDataTypeSum) doubleSumMetric.SetName("double_sum") doubleSum := doubleSumMetric.Sum() doubleSumDataPoints := doubleSum.DataPoints() @@ -107,7 +108,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { doubleSumDataPoint.SetTimestamp(testTimestamp) doubleHistogramMetric := metrics.AppendEmpty() - doubleHistogramMetric.SetDataType(pdata.MetricDataTypeHistogram) + doubleHistogramMetric.SetDataType(pmetric.MetricDataTypeHistogram) doubleHistogramMetric.SetName("double_histogram") doubleHistogram := doubleHistogramMetric.Histogram() doubleHistogramDataPoints := doubleHistogram.DataPoints() @@ -117,7 +118,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { doubleHistogramDataPoint.SetExplicitBounds([]float64{0, 2, 4, 8}) doubleHistogramDataPoint.SetBucketCounts([]uint64{0, 1, 0, 1, 0}) doubleHistogramDataPoint.SetTimestamp(testTimestamp) - doubleHistogram.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + doubleHistogram.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) type fields struct { settings component.TelemetrySettings @@ -126,7 +127,7 @@ func Test_exporter_PushMetricsData(t *testing.T) { } type args struct { ctx context.Context - md pdata.Metrics + md pmetric.Metrics } test := struct { name string @@ -176,7 +177,7 @@ func Test_exporter_PushMetricsData_EmptyPayload(t *testing.T) { })) defer ts.Close() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().EnsureCapacity(2) rm := md.ResourceMetrics().AppendEmpty() @@ -208,7 +209,7 @@ func Test_exporter_PushMetricsData_isDisabled(t *testing.T) { })) defer ts.Close() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().EnsureCapacity(2) rm := md.ResourceMetrics().AppendEmpty() @@ -218,7 +219,7 @@ func Test_exporter_PushMetricsData_isDisabled(t *testing.T) { metrics := ilm.Metrics() metric := metrics.AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetName("int_gauge") intGauge := metric.Gauge() intGaugeDataPoints := intGauge.DataPoints() @@ -396,7 +397,7 @@ func Test_exporter_PushMetricsData_Error(t *testing.T) { })) ts.Close() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().EnsureCapacity(2) rm := md.ResourceMetrics().AppendEmpty() @@ -406,7 +407,7 @@ func Test_exporter_PushMetricsData_Error(t *testing.T) { metrics := ilm.Metrics() intGaugeMetric := metrics.AppendEmpty() - intGaugeMetric.SetDataType(pdata.MetricDataTypeGauge) + intGaugeMetric.SetDataType(pmetric.MetricDataTypeGauge) intGaugeMetric.SetName("int_gauge") intGauge := intGaugeMetric.Gauge() intGaugeDataPoints := intGauge.DataPoints() @@ -421,7 +422,7 @@ func Test_exporter_PushMetricsData_Error(t *testing.T) { } type args struct { ctx context.Context - md pdata.Metrics + md pmetric.Metrics } test := struct { name string diff --git a/exporter/elasticexporter/config_test.go b/exporter/elasticexporter/config_test.go index 59bac4bdd23b..fbced956128b 100644 --- a/exporter/elasticexporter/config_test.go +++ b/exporter/elasticexporter/config_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/service/servicetest" ) @@ -102,7 +102,7 @@ func testAuth(t *testing.T, apiKey, secretToken, expectedAuthorization string) { assert.NoError(t, err) assert.NotNil(t, te, "failed to create trace exporter") - traces := pdata.NewTraces() + traces := ptrace.NewTraces() span := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("foobar") assert.NoError(t, te.ConsumeTraces(context.Background(), traces)) diff --git a/exporter/elasticexporter/exporter.go b/exporter/elasticexporter/exporter.go index 7c243489dfe9..ba10d87e9af4 100644 --- a/exporter/elasticexporter/exporter.go +++ b/exporter/elasticexporter/exporter.go @@ -29,7 +29,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "go.uber.org/zap" @@ -44,7 +45,7 @@ func newElasticTracesExporter( if err != nil { return nil, fmt.Errorf("cannot configure Elastic APM trace exporter: %v", err) } - return exporterhelper.NewTracesExporter(cfg, set, func(ctx context.Context, traces pdata.Traces) error { + return exporterhelper.NewTracesExporter(cfg, set, func(ctx context.Context, traces ptrace.Traces) error { var errs error resourceSpansSlice := traces.ResourceSpans() for i := 0; i < resourceSpansSlice.Len(); i++ { @@ -64,7 +65,7 @@ func newElasticMetricsExporter( if err != nil { return nil, fmt.Errorf("cannot configure Elastic APM metrics exporter: %v", err) } - return exporterhelper.NewMetricsExporter(cfg, set, func(ctx context.Context, input pdata.Metrics) error { + return exporterhelper.NewMetricsExporter(cfg, set, func(ctx context.Context, input pmetric.Metrics) error { var errs error resourceMetricsSlice := input.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { @@ -122,7 +123,7 @@ func newTransport(config *Config) (transport.Transport, error) { // ExportResourceSpans exports OTLP trace data to Elastic APM Server, // returning the number of spans that were dropped along with any errors. -func (e *elasticExporter) ExportResourceSpans(ctx context.Context, rs pdata.ResourceSpans) (int, error) { +func (e *elasticExporter) ExportResourceSpans(ctx context.Context, rs ptrace.ResourceSpans) (int, error) { var w fastjson.Writer elastic.EncodeResourceMetadata(rs.Resource(), &w) var errs []error @@ -150,7 +151,7 @@ func (e *elasticExporter) ExportResourceSpans(ctx context.Context, rs pdata.Reso // ExportResourceMetrics exports OTLP metrics to Elastic APM Server, // returning the number of metrics that were dropped along with any errors. -func (e *elasticExporter) ExportResourceMetrics(ctx context.Context, rm pdata.ResourceMetrics) (int, error) { +func (e *elasticExporter) ExportResourceMetrics(ctx context.Context, rm pmetric.ResourceMetrics) (int, error) { var w fastjson.Writer elastic.EncodeResourceMetadata(rm.Resource(), &w) var errs error diff --git a/exporter/elasticexporter/exporter_test.go b/exporter/elasticexporter/exporter_test.go index 8df90a073d61..7ed6db6679a5 100644 --- a/exporter/elasticexporter/exporter_test.go +++ b/exporter/elasticexporter/exporter_test.go @@ -27,8 +27,9 @@ import ( "github.com/stretchr/testify/require" "go.elastic.co/apm/transport/transporttest" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport/obsreporttest" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestTracesExporter(t *testing.T) { @@ -43,7 +44,7 @@ func TestTracesExporter(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, te, "failed to create trace exporter") - traces := pdata.NewTraces() + traces := ptrace.NewTraces() resourceSpans := traces.ResourceSpans() span := resourceSpans.AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("foobar") @@ -104,14 +105,14 @@ func TestMetricsExporterSendError(t *testing.T) { assert.NoError(t, me.Shutdown(context.Background())) } -func sampleMetrics() pdata.Metrics { - metrics := pdata.NewMetrics() +func sampleMetrics() pmetric.Metrics { + metrics := pmetric.NewMetrics() resourceMetrics := metrics.ResourceMetrics() resourceMetrics.EnsureCapacity(2) for i := 0; i < 2; i++ { metric := resourceMetrics.AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() metric.SetName("foobar") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.Gauge().DataPoints().AppendEmpty().SetDoubleVal(123) } return metrics diff --git a/exporter/elasticexporter/go.mod b/exporter/elasticexporter/go.mod index 6be22a7c3675..cfe836c2f4a3 100644 --- a/exporter/elasticexporter/go.mod +++ b/exporter/elasticexporter/go.mod @@ -6,15 +6,16 @@ require ( github.com/stretchr/testify v1.7.1 go.elastic.co/apm v1.15.0 go.elastic.co/fastjson v1.1.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) require ( github.com/armon/go-radix v1.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/elastic/go-licenser v0.3.1 // indirect github.com/elastic/go-sysinfo v1.1.1 // indirect @@ -23,11 +24,10 @@ require ( github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.7 // indirect github.com/jcchavezs/porto v0.1.0 // indirect github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -37,25 +37,21 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/otel/sdk v1.6.3 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.5.1 // indirect - golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/tools v0.1.9 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/elasticexporter/go.sum b/exporter/elasticexporter/go.sum index b501335e3fd6..fa7b901e2608 100644 --- a/exporter/elasticexporter/go.sum +++ b/exporter/elasticexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= @@ -20,19 +17,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -46,9 +35,6 @@ github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -56,7 +42,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -73,18 +58,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -94,13 +75,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -135,8 +113,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -182,7 +160,6 @@ github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7z github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= @@ -190,15 +167,11 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -212,23 +185,22 @@ go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= -go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= -go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -258,20 +230,16 @@ golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f h1:OfiFi4JbukWwe3lzw+xunroH1mnC1e2Gy5cxNJApiSY= -golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -290,7 +258,6 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -299,15 +266,13 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -332,22 +297,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -357,11 +316,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -370,8 +325,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/elasticexporter/internal/translator/elastic/exceptions_test.go b/exporter/elasticexporter/internal/translator/elastic/exceptions_test.go index 177928ea6b8f..e25da4e62efe 100644 --- a/exporter/elasticexporter/internal/translator/elastic/exceptions_test.go +++ b/exporter/elasticexporter/internal/translator/elastic/exceptions_test.go @@ -23,17 +23,18 @@ import ( "go.elastic.co/apm/model" "go.elastic.co/apm/transport/transporttest" "go.elastic.co/fastjson" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticexporter/internal/translator/elastic" ) func TestEncodeSpanEventsNonExceptions(t *testing.T) { - nonExceptionEvent := pdata.NewSpanEvent() + nonExceptionEvent := ptrace.NewSpanEvent() nonExceptionEvent.SetName("not_exception") - incompleteExceptionEvent := pdata.NewSpanEvent() + incompleteExceptionEvent := ptrace.NewSpanEvent() incompleteExceptionEvent.SetName("exception") // At least one of exception.message and exception.type is required. incompleteExceptionEvent.Attributes().InsertString(conventions.AttributeExceptionStacktrace, "stacktrace") @@ -45,8 +46,8 @@ func TestEncodeSpanEventsNonExceptions(t *testing.T) { func TestEncodeSpanEventsJavaExceptions(t *testing.T) { timestamp := time.Unix(123, 0).UTC() - exceptionEvent1 := pdata.NewSpanEvent() - exceptionEvent1.SetTimestamp(pdata.NewTimestampFromTime(timestamp)) + exceptionEvent1 := ptrace.NewSpanEvent() + exceptionEvent1.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exceptionEvent1.SetName("exception") exceptionEvent1.Attributes().InsertString("exception.type", "java.net.ConnectException.OSError") exceptionEvent1.Attributes().InsertString("exception.message", "Division by zero") @@ -60,8 +61,8 @@ func TestEncodeSpanEventsJavaExceptions(t *testing.T) { at com.foo.loader//com.foo.bar.App.run(App.java:12) at java.base/java.lang.Thread.run(Unknown Source) `[1:]) - exceptionEvent2 := pdata.NewSpanEvent() - exceptionEvent2.SetTimestamp(pdata.NewTimestampFromTime(timestamp)) + exceptionEvent2 := ptrace.NewSpanEvent() + exceptionEvent2.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exceptionEvent2.SetName("exception") exceptionEvent2.Attributes().InsertString("exception.type", "HighLevelException") exceptionEvent2.Attributes().InsertString("exception.message", "MidLevelException: LowLevelException") @@ -235,9 +236,9 @@ Caused by: whatever at the movies`, } - var events []pdata.SpanEvent + var events []ptrace.SpanEvent for _, stacktrace := range stacktraces { - event := pdata.NewSpanEvent() + event := ptrace.NewSpanEvent() event.SetName("exception") event.Attributes().InsertString("exception.type", "ExceptionType") event.Attributes().InsertString("exception.stacktrace", stacktrace) @@ -256,8 +257,8 @@ Caused by: whatever func TestEncodeSpanEventsNonJavaExceptions(t *testing.T) { timestamp := time.Unix(123, 0).UTC() - exceptionEvent := pdata.NewSpanEvent() - exceptionEvent.SetTimestamp(pdata.NewTimestampFromTime(timestamp)) + exceptionEvent := ptrace.NewSpanEvent() + exceptionEvent.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) exceptionEvent.SetName("exception") exceptionEvent.Attributes().InsertString("exception.type", "the_type") exceptionEvent.Attributes().InsertString("exception.message", "the_message") @@ -283,13 +284,13 @@ func TestEncodeSpanEventsNonJavaExceptions(t *testing.T) { }, errors[0]) } -func encodeSpanEvents(t *testing.T, language string, events ...pdata.SpanEvent) (model.Transaction, []model.Error) { +func encodeSpanEvents(t *testing.T, language string, events ...ptrace.SpanEvent) (model.Transaction, []model.Error) { traceID := model.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} transactionID := model.SpanID{1, 1, 1, 1, 1, 1, 1, 1} - span := pdata.NewSpan() - span.SetTraceID(pdata.NewTraceID(traceID)) - span.SetSpanID(pdata.NewSpanID(transactionID)) + span := ptrace.NewSpan() + span.SetTraceID(pcommon.NewTraceID(traceID)) + span.SetSpanID(pcommon.NewSpanID(transactionID)) for _, event := range events { tgt := span.Events().AppendEmpty() event.CopyTo(tgt) @@ -297,10 +298,10 @@ func encodeSpanEvents(t *testing.T, language string, events ...pdata.SpanEvent) var w fastjson.Writer var recorder transporttest.RecorderTransport - resource := pdata.NewResource() + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeTelemetrySDKLanguage, language) elastic.EncodeResourceMetadata(resource, &w) - err := elastic.EncodeSpan(span, pdata.NewInstrumentationScope(), resource, &w) + err := elastic.EncodeSpan(span, pcommon.NewInstrumentationScope(), resource, &w) assert.NoError(t, err) sendStream(t, &w, &recorder) diff --git a/exporter/elasticexporter/internal/translator/elastic/metadata.go b/exporter/elasticexporter/internal/translator/elastic/metadata.go index de49e2e2775c..06f14f70a21a 100644 --- a/exporter/elasticexporter/internal/translator/elastic/metadata.go +++ b/exporter/elasticexporter/internal/translator/elastic/metadata.go @@ -20,12 +20,12 @@ import ( "go.elastic.co/apm/model" "go.elastic.co/fastjson" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) // EncodeResourceMetadata encodes a metadata line from resource, writing to w. -func EncodeResourceMetadata(resource pdata.Resource, w *fastjson.Writer) { +func EncodeResourceMetadata(resource pcommon.Resource, w *fastjson.Writer) { var agent model.Agent var service model.Service var serviceNode model.ServiceNode @@ -35,7 +35,7 @@ func EncodeResourceMetadata(resource pdata.Resource, w *fastjson.Writer) { var k8sPod model.KubernetesPod var labels model.IfaceMap - resource.Attributes().Range(func(k string, v pdata.Value) bool { + resource.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case conventions.AttributeServiceName: service.Name = cleanServiceName(v.StringVal()) diff --git a/exporter/elasticexporter/internal/translator/elastic/metadata_test.go b/exporter/elasticexporter/internal/translator/elastic/metadata_test.go index 3b553508199f..efe9b1cecc0d 100644 --- a/exporter/elasticexporter/internal/translator/elastic/metadata_test.go +++ b/exporter/elasticexporter/internal/translator/elastic/metadata_test.go @@ -21,14 +21,14 @@ import ( "go.elastic.co/apm/model" "go.elastic.co/apm/transport/transporttest" "go.elastic.co/fastjson" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticexporter/internal/translator/elastic" ) func TestMetadataDefaults(t *testing.T) { - out := metadataWithResource(t, pdata.NewResource()) + out := metadataWithResource(t, pcommon.NewResource()) assert.Equal(t, metadata{ service: model.Service{ Name: "unknown", @@ -137,13 +137,13 @@ func TestMetadataKubernetes(t *testing.T) { }, out.system.Kubernetes) } -func resourceFromAttributesMap(attrs map[string]interface{}) pdata.Resource { - resource := pdata.NewResource() - pdata.NewMapFromRaw(attrs).CopyTo(resource.Attributes()) +func resourceFromAttributesMap(attrs map[string]interface{}) pcommon.Resource { + resource := pcommon.NewResource() + pcommon.NewMapFromRaw(attrs).CopyTo(resource.Attributes()) return resource } -func metadataWithResource(t *testing.T, resource pdata.Resource) metadata { +func metadataWithResource(t *testing.T, resource pcommon.Resource) metadata { var out metadata var recorder transporttest.RecorderTransport var w fastjson.Writer diff --git a/exporter/elasticexporter/internal/translator/elastic/metrics.go b/exporter/elasticexporter/internal/translator/elastic/metrics.go index 3ba203422ca9..f71cb566fb59 100644 --- a/exporter/elasticexporter/internal/translator/elastic/metrics.go +++ b/exporter/elasticexporter/internal/translator/elastic/metrics.go @@ -23,7 +23,8 @@ import ( "go.elastic.co/apm/model" "go.elastic.co/fastjson" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // EncodeMetrics encodes an OpenTelemetry metrics slice, and instrumentation @@ -31,23 +32,23 @@ import ( // // TODO(axw) otlpLibrary is currently not used. We should consider recording // it as metadata. -func EncodeMetrics(otlpMetrics pdata.MetricSlice, otlpLibrary pdata.InstrumentationScope, w *fastjson.Writer) (dropped int, _ error) { +func EncodeMetrics(otlpMetrics pmetric.MetricSlice, otlpLibrary pcommon.InstrumentationScope, w *fastjson.Writer) (dropped int, _ error) { var metricsets metricsets for i := 0; i < otlpMetrics.Len(); i++ { metric := otlpMetrics.At(i) name := metric.Name() switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: doubleGauge := metric.Gauge() dps := doubleGauge.DataPoints() for i := 0; i < dps.Len(); i++ { dp := dps.At(i) var val float64 switch dp.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val = dp.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val = float64(dp.IntVal()) } metricsets.upsert(model.Metrics{ @@ -58,16 +59,16 @@ func EncodeMetrics(otlpMetrics pdata.MetricSlice, otlpLibrary pdata.Instrumentat }}, }) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: doubleSum := metric.Sum() dps := doubleSum.DataPoints() for i := 0; i < dps.Len(); i++ { dp := dps.At(i) var val float64 switch dp.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val = dp.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val = float64(dp.IntVal()) } metricsets.upsert(model.Metrics{ @@ -78,7 +79,7 @@ func EncodeMetrics(otlpMetrics pdata.MetricSlice, otlpLibrary pdata.Instrumentat }}, }) } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: // TODO(axw) requires https://github.com/elastic/apm-server/issues/3195 doubleHistogram := metric.Histogram() dropped += doubleHistogram.DataPoints().Len() @@ -97,14 +98,14 @@ func EncodeMetrics(otlpMetrics pdata.MetricSlice, otlpLibrary pdata.Instrumentat return dropped, nil } -func asTime(in pdata.Timestamp) model.Time { +func asTime(in pcommon.Timestamp) model.Time { return model.Time(time.Unix(0, int64(in))) } -func asStringMap(in pdata.Map) model.StringMap { +func asStringMap(in pcommon.Map) model.StringMap { var out model.StringMap in.Sort() - in.Range(func(k string, v pdata.Value) bool { + in.Range(func(k string, v pcommon.Value) bool { out = append(out, model.StringMapItem{ Key: k, Value: v.AsString(), diff --git a/exporter/elasticexporter/internal/translator/elastic/metrics_test.go b/exporter/elasticexporter/internal/translator/elastic/metrics_test.go index 631489fa2bc1..193d63db388b 100644 --- a/exporter/elasticexporter/internal/translator/elastic/metrics_test.go +++ b/exporter/elasticexporter/internal/translator/elastic/metrics_test.go @@ -23,7 +23,8 @@ import ( "go.elastic.co/apm/model" "go.elastic.co/apm/transport/transporttest" "go.elastic.co/fastjson" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticexporter/internal/translator/elastic" ) @@ -31,11 +32,11 @@ import ( func TestEncodeMetrics(t *testing.T) { var w fastjson.Writer var recorder transporttest.RecorderTransport - elastic.EncodeResourceMetadata(pdata.NewResource(), &w) + elastic.EncodeResourceMetadata(pcommon.NewResource(), &w) - scopeMetrics := pdata.NewScopeMetrics() + scopeMetrics := pmetric.NewScopeMetrics() metrics := scopeMetrics.Metrics() - appendMetric := func(name string, dataType pdata.MetricDataType) pdata.Metric { + appendMetric := func(name string, dataType pmetric.MetricDataType) pmetric.Metric { metric := metrics.AppendEmpty() metric.SetName(name) metric.SetDataType(dataType) @@ -47,74 +48,74 @@ func TestEncodeMetrics(t *testing.T) { var expectDropped int - metric := appendMetric("int_gauge_metric", pdata.MetricDataTypeGauge) + metric := appendMetric("int_gauge_metric", pmetric.MetricDataTypeGauge) intGauge := metric.Gauge() intGauge.DataPoints().EnsureCapacity(4) idp := intGauge.DataPoints().AppendEmpty() - idp.SetTimestamp(pdata.NewTimestampFromTime(timestamp0)) + idp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp0)) idp.SetIntVal(1) idp = intGauge.DataPoints().AppendEmpty() - idp.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + idp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) idp.SetIntVal(2) idp.Attributes().InsertString("k", "v") idp = intGauge.DataPoints().AppendEmpty() - idp.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + idp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) idp.SetIntVal(3) idp = intGauge.DataPoints().AppendEmpty() - idp.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + idp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) idp.SetIntVal(4) idp.Attributes().InsertString("k", "v2") - metric = appendMetric("double_gauge_metric", pdata.MetricDataTypeGauge) + metric = appendMetric("double_gauge_metric", pmetric.MetricDataTypeGauge) doubleGauge := metric.Gauge() doubleGauge.DataPoints().EnsureCapacity(4) ddp := doubleGauge.DataPoints().AppendEmpty() - ddp.SetTimestamp(pdata.NewTimestampFromTime(timestamp0)) + ddp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp0)) ddp.SetDoubleVal(5) ddp = doubleGauge.DataPoints().AppendEmpty() - ddp.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + ddp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) ddp.SetDoubleVal(6) ddp.Attributes().InsertString("k", "v") ddp = doubleGauge.DataPoints().AppendEmpty() - ddp.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + ddp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) ddp.SetDoubleVal(7) ddp = doubleGauge.DataPoints().AppendEmpty() - ddp.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + ddp.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) ddp.SetDoubleVal(8) ddp.Attributes().InsertString("k", "v2") - metric = appendMetric("int_sum_metric", pdata.MetricDataTypeSum) + metric = appendMetric("int_sum_metric", pmetric.MetricDataTypeSum) intSum := metric.Sum() intSum.DataPoints().EnsureCapacity(3) is := intSum.DataPoints().AppendEmpty() - is.SetTimestamp(pdata.NewTimestampFromTime(timestamp0)) + is.SetTimestamp(pcommon.NewTimestampFromTime(timestamp0)) is.SetIntVal(9) is = intSum.DataPoints().AppendEmpty() - is.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + is.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) is.SetIntVal(10) is.Attributes().InsertString("k", "v") is = intSum.DataPoints().AppendEmpty() - is.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + is.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) is.SetIntVal(11) is.Attributes().InsertString("k2", "v") - metric = appendMetric("double_sum_metric", pdata.MetricDataTypeSum) + metric = appendMetric("double_sum_metric", pmetric.MetricDataTypeSum) doubleSum := metric.Sum() doubleSum.DataPoints().EnsureCapacity(3) ds := doubleSum.DataPoints().AppendEmpty() - ds.SetTimestamp(pdata.NewTimestampFromTime(timestamp0)) + ds.SetTimestamp(pcommon.NewTimestampFromTime(timestamp0)) ds.SetDoubleVal(12) ds = doubleSum.DataPoints().AppendEmpty() - ds.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + ds.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) ds.SetDoubleVal(13) ds.Attributes().InsertString("k", "v") ds = doubleSum.DataPoints().AppendEmpty() - ds.SetTimestamp(pdata.NewTimestampFromTime(timestamp1)) + ds.SetTimestamp(pcommon.NewTimestampFromTime(timestamp1)) ds.SetDoubleVal(14) ds.Attributes().InsertString("k2", "v") // Histograms are currently not supported, and will be ignored. - metric = appendMetric("double_histogram_metric", pdata.MetricDataTypeHistogram) + metric = appendMetric("double_histogram_metric", pmetric.MetricDataTypeHistogram) metric.Histogram().DataPoints().AppendEmpty() expectDropped++ diff --git a/exporter/elasticexporter/internal/translator/elastic/traces.go b/exporter/elasticexporter/internal/translator/elastic/traces.go index bfd28eab3d03..1007592414ed 100644 --- a/exporter/elasticexporter/internal/translator/elastic/traces.go +++ b/exporter/elasticexporter/internal/translator/elastic/traces.go @@ -25,8 +25,9 @@ import ( "go.elastic.co/apm/model" "go.elastic.co/fastjson" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) // EncodeSpan encodes an OpenTelemetry span, and instrumentation library information, @@ -36,9 +37,9 @@ import ( // // TODO(axw) otlpLibrary is currently not used. We should consider recording it as metadata. func EncodeSpan( - otlpSpan pdata.Span, - otlpLibrary pdata.InstrumentationScope, - otlpResource pdata.Resource, + otlpSpan ptrace.Span, + otlpLibrary pcommon.InstrumentationScope, + otlpResource pcommon.Resource, w *fastjson.Writer, ) error { spanID := model.SpanID(otlpSpan.SpanID().Bytes()) @@ -52,7 +53,7 @@ func EncodeSpan( name := truncate(otlpSpan.Name()) var transactionContext transactionContext - if root || otlpSpan.Kind() == pdata.SpanKindServer { + if root || otlpSpan.Kind() == ptrace.SpanKindServer { transaction := model.Transaction{ ID: spanID, TraceID: traceID, @@ -95,8 +96,8 @@ func EncodeSpan( } func setTransactionProperties( - otlpSpan pdata.Span, - otlpLibrary pdata.InstrumentationScope, + otlpSpan ptrace.Span, + otlpLibrary pcommon.InstrumentationScope, tx *model.Transaction, context *transactionContext, ) error { var ( @@ -106,7 +107,7 @@ func setTransactionProperties( netPeerPort int ) - otlpSpan.Attributes().Range(func(k string, v pdata.Value) bool { + otlpSpan.Attributes().Range(func(k string, v pcommon.Value) bool { var storeTag bool switch k { // http.* @@ -186,9 +187,9 @@ func setTransactionProperties( status := otlpSpan.Status() tx.Outcome = spanStatusOutcome(status) switch status.Code() { - case pdata.StatusCodeOk: + case ptrace.StatusCodeOk: tx.Result = "OK" - case pdata.StatusCodeError: + case ptrace.StatusCodeError: tx.Result = "Error" } @@ -218,7 +219,7 @@ func setTransactionProperties( return nil } -func setSpanProperties(otlpSpan pdata.Span, span *model.Span) error { +func setSpanProperties(otlpSpan ptrace.Span, span *model.Span) error { var ( context spanContext netPeerName string @@ -226,7 +227,7 @@ func setSpanProperties(otlpSpan pdata.Span, span *model.Span) error { netPeerPort int ) - otlpSpan.Attributes().Range(func(k string, v pdata.Value) bool { + otlpSpan.Attributes().Range(func(k string, v pcommon.Value) bool { var storeTag bool switch k { // http.* @@ -358,8 +359,8 @@ func setSpanProperties(otlpSpan pdata.Span, span *model.Span) error { } func encodeSpanEvents( - events pdata.SpanEventSlice, - resource pdata.Resource, + events ptrace.SpanEventSlice, + resource pcommon.Resource, traceID model.TraceID, spanID model.SpanID, w *fastjson.Writer, ) error { @@ -381,7 +382,7 @@ func encodeSpanEvents( } var exceptionEscaped bool var exceptionMessage, exceptionStacktrace, exceptionType string - event.Attributes().Range(func(k string, v pdata.Value) bool { + event.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case conventions.AttributeExceptionMessage: exceptionMessage = v.StringVal() @@ -618,11 +619,11 @@ func schemeDefaultPort(scheme string) int { return 0 } -func spanStatusOutcome(status pdata.SpanStatus) string { +func spanStatusOutcome(status ptrace.SpanStatus) string { switch status.Code() { - case pdata.StatusCodeOk: + case ptrace.StatusCodeOk: return "success" - case pdata.StatusCodeError: + case ptrace.StatusCodeError: return "failure" } // Outcome will be set by the server. diff --git a/exporter/elasticexporter/internal/translator/elastic/traces_test.go b/exporter/elasticexporter/internal/translator/elastic/traces_test.go index 7fa0a230965c..eebc426a6a01 100644 --- a/exporter/elasticexporter/internal/translator/elastic/traces_test.go +++ b/exporter/elasticexporter/internal/translator/elastic/traces_test.go @@ -24,7 +24,8 @@ import ( "go.elastic.co/apm/model" "go.elastic.co/apm/transport/transporttest" "go.elastic.co/fastjson" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticexporter/internal/translator/elastic" ) @@ -32,7 +33,7 @@ import ( func TestEncodeSpan(t *testing.T) { var w fastjson.Writer var recorder transporttest.RecorderTransport - elastic.EncodeResourceMetadata(pdata.NewResource(), &w) + elastic.EncodeResourceMetadata(pcommon.NewResource(), &w) traceID := model.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} rootTransactionID := model.SpanID{1, 1, 1, 1, 1, 1, 1, 1} @@ -42,40 +43,40 @@ func TestEncodeSpan(t *testing.T) { startTime := time.Unix(123, 0).UTC() endTime := startTime.Add(time.Millisecond * 5) - rootSpan := pdata.NewSpan() - rootSpan.SetSpanID(pdata.NewSpanID(rootTransactionID)) + rootSpan := ptrace.NewSpan() + rootSpan.SetSpanID(pcommon.NewSpanID(rootTransactionID)) rootSpan.SetName("root_span") rootSpan.Attributes().InsertString("string.attr", "string_value") rootSpan.Attributes().InsertInt("int.attr", 123) rootSpan.Attributes().InsertDouble("double.attr", 123.456) rootSpan.Attributes().InsertBool("bool.attr", true) - clientSpan := pdata.NewSpan() - clientSpan.SetSpanID(pdata.NewSpanID(clientSpanID)) - clientSpan.SetParentSpanID(pdata.NewSpanID(rootTransactionID)) - clientSpan.SetKind(pdata.SpanKindClient) + clientSpan := ptrace.NewSpan() + clientSpan.SetSpanID(pcommon.NewSpanID(clientSpanID)) + clientSpan.SetParentSpanID(pcommon.NewSpanID(rootTransactionID)) + clientSpan.SetKind(ptrace.SpanKindClient) clientSpan.SetName("client_span") - clientSpan.Status().SetCode(pdata.StatusCodeError) + clientSpan.Status().SetCode(ptrace.StatusCodeError) clientSpan.Attributes().InsertString("string.attr", "string_value") clientSpan.Attributes().InsertInt("int.attr", 123) clientSpan.Attributes().InsertDouble("double.attr", 123.456) clientSpan.Attributes().InsertBool("bool.attr", true) - serverSpan := pdata.NewSpan() - serverSpan.SetSpanID(pdata.NewSpanID(serverTransactionID)) - serverSpan.SetParentSpanID(pdata.NewSpanID(clientSpanID)) - serverSpan.SetKind(pdata.SpanKindServer) + serverSpan := ptrace.NewSpan() + serverSpan.SetSpanID(pcommon.NewSpanID(serverTransactionID)) + serverSpan.SetParentSpanID(pcommon.NewSpanID(clientSpanID)) + serverSpan.SetKind(ptrace.SpanKindServer) serverSpan.SetName("server_span") - serverSpan.Status().SetCode(pdata.StatusCodeOk) + serverSpan.Status().SetCode(ptrace.StatusCodeOk) - for _, span := range []pdata.Span{rootSpan, clientSpan, serverSpan} { - span.SetTraceID(pdata.NewTraceID(traceID)) - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + for _, span := range []ptrace.Span{rootSpan, clientSpan, serverSpan} { + span.SetTraceID(pcommon.NewTraceID(traceID)) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) } - for _, span := range []pdata.Span{rootSpan, clientSpan, serverSpan} { - err := elastic.EncodeSpan(span, pdata.NewInstrumentationScope(), pdata.NewResource(), &w) + for _, span := range []ptrace.Span{rootSpan, clientSpan, serverSpan} { + err := elastic.EncodeSpan(span, pcommon.NewInstrumentationScope(), pcommon.NewResource(), &w) require.NoError(t, err) } sendStream(t, &w, &recorder) @@ -145,23 +146,23 @@ func TestEncodeSpan(t *testing.T) { } func TestEncodeSpanStatus(t *testing.T) { - testStatusCode := func(t *testing.T, statusCode pdata.StatusCode, expectedResult, expectedOutcome string) { + testStatusCode := func(t *testing.T, statusCode ptrace.StatusCode, expectedResult, expectedOutcome string) { t.Helper() var w fastjson.Writer var recorder transporttest.RecorderTransport - elastic.EncodeResourceMetadata(pdata.NewResource(), &w) + elastic.EncodeResourceMetadata(pcommon.NewResource(), &w) - span := pdata.NewSpan() - span.SetTraceID(pdata.NewTraceID([16]byte{1})) - span.SetSpanID(pdata.NewSpanID([8]byte{1})) + span := ptrace.NewSpan() + span.SetTraceID(pcommon.NewTraceID([16]byte{1})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1})) span.SetName("span") if statusCode >= 0 { span.Status().SetCode(statusCode) } - err := elastic.EncodeSpan(span, pdata.NewInstrumentationScope(), pdata.NewResource(), &w) + err := elastic.EncodeSpan(span, pcommon.NewInstrumentationScope(), pcommon.NewResource(), &w) require.NoError(t, err) sendStream(t, &w, &recorder) payloads := recorder.Payloads() @@ -171,19 +172,19 @@ func TestEncodeSpanStatus(t *testing.T) { } testStatusCode(t, -1, "", "") - testStatusCode(t, pdata.StatusCodeUnset, "", "") - testStatusCode(t, pdata.StatusCodeOk, "OK", "success") - testStatusCode(t, pdata.StatusCodeError, "Error", "failure") + testStatusCode(t, ptrace.StatusCodeUnset, "", "") + testStatusCode(t, ptrace.StatusCodeOk, "OK", "success") + testStatusCode(t, ptrace.StatusCodeError, "Error", "failure") } func TestEncodeSpanTruncation(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName(strings.Repeat("x", 1300)) var w fastjson.Writer var recorder transporttest.RecorderTransport - elastic.EncodeResourceMetadata(pdata.NewResource(), &w) - err := elastic.EncodeSpan(span, pdata.NewInstrumentationScope(), pdata.NewResource(), &w) + elastic.EncodeResourceMetadata(pcommon.NewResource(), &w) + err := elastic.EncodeSpan(span, pcommon.NewInstrumentationScope(), pcommon.NewResource(), &w) require.NoError(t, err) sendStream(t, &w, &recorder) @@ -489,14 +490,14 @@ func TestInstrumentationLibrary(t *testing.T) { var w fastjson.Writer var recorder transporttest.RecorderTransport - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("root_span") - library := pdata.NewInstrumentationScope() + library := pcommon.NewInstrumentationScope() library.SetName("library-name") library.SetVersion("1.2.3") - resource := pdata.NewResource() + resource := pcommon.NewResource() elastic.EncodeResourceMetadata(resource, &w) err := elastic.EncodeSpan(span, library, resource, &w) assert.NoError(t, err) @@ -518,12 +519,12 @@ func transactionWithAttributes(t *testing.T, attrs map[string]interface{}) model var w fastjson.Writer var recorder transporttest.RecorderTransport - span := pdata.NewSpan() - pdata.NewMapFromRaw(attrs).CopyTo(span.Attributes()) + span := ptrace.NewSpan() + pcommon.NewMapFromRaw(attrs).CopyTo(span.Attributes()) - resource := pdata.NewResource() + resource := pcommon.NewResource() elastic.EncodeResourceMetadata(resource, &w) - err := elastic.EncodeSpan(span, pdata.NewInstrumentationScope(), resource, &w) + err := elastic.EncodeSpan(span, pcommon.NewInstrumentationScope(), resource, &w) assert.NoError(t, err) sendStream(t, &w, &recorder) @@ -536,13 +537,13 @@ func spanWithAttributes(t *testing.T, attrs map[string]interface{}) model.Span { var w fastjson.Writer var recorder transporttest.RecorderTransport - span := pdata.NewSpan() - span.SetParentSpanID(pdata.NewSpanID([8]byte{1})) - pdata.NewMapFromRaw(attrs).CopyTo(span.Attributes()) + span := ptrace.NewSpan() + span.SetParentSpanID(pcommon.NewSpanID([8]byte{1})) + pcommon.NewMapFromRaw(attrs).CopyTo(span.Attributes()) - resource := pdata.NewResource() + resource := pcommon.NewResource() elastic.EncodeResourceMetadata(resource, &w) - err := elastic.EncodeSpan(span, pdata.NewInstrumentationScope(), resource, &w) + err := elastic.EncodeSpan(span, pcommon.NewInstrumentationScope(), resource, &w) assert.NoError(t, err) sendStream(t, &w, &recorder) diff --git a/exporter/elasticexporter/internal/translator/elastic/utils.go b/exporter/elasticexporter/internal/translator/elastic/utils.go index 4e141ed35e2b..c43a46474e26 100644 --- a/exporter/elasticexporter/internal/translator/elastic/utils.go +++ b/exporter/elasticexporter/internal/translator/elastic/utils.go @@ -20,7 +20,7 @@ import ( "regexp" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) var ( @@ -28,15 +28,15 @@ var ( labelKeyReplacer = strings.NewReplacer(`.`, `_`, `*`, `_`, `"`, `_`) ) -func ifaceAttributeValue(v pdata.Value) interface{} { +func ifaceAttributeValue(v pcommon.Value) interface{} { switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return truncate(v.StringVal()) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return v.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return v.DoubleVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return v.BoolVal() } return nil diff --git a/exporter/elasticsearchexporter/exporter.go b/exporter/elasticsearchexporter/exporter.go index a3e01bd2eb86..fd70632e234b 100644 --- a/exporter/elasticsearchexporter/exporter.go +++ b/exporter/elasticsearchexporter/exporter.go @@ -28,7 +28,8 @@ import ( "github.com/cenkalti/backoff/v4" elasticsearch7 "github.com/elastic/go-elasticsearch/v7" esutil7 "github.com/elastic/go-elasticsearch/v7/esutil" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" "go.uber.org/zap" @@ -94,7 +95,7 @@ func (e *elasticsearchExporter) Shutdown(ctx context.Context) error { return e.bulkIndexer.Close(ctx) } -func (e *elasticsearchExporter) pushLogsData(ctx context.Context, ld pdata.Logs) error { +func (e *elasticsearchExporter) pushLogsData(ctx context.Context, ld plog.Logs) error { var errs []error rls := ld.ResourceLogs() @@ -119,7 +120,7 @@ func (e *elasticsearchExporter) pushLogsData(ctx context.Context, ld pdata.Logs) return multierr.Combine(errs...) } -func (e *elasticsearchExporter) pushLogRecord(ctx context.Context, resource pdata.Resource, record pdata.LogRecord) error { +func (e *elasticsearchExporter) pushLogRecord(ctx context.Context, resource pcommon.Resource, record plog.LogRecord) error { document, err := e.model.encodeLog(resource, record) if err != nil { return fmt.Errorf("Failed to encode log event: %w", err) diff --git a/exporter/elasticsearchexporter/go.mod b/exporter/elasticsearchexporter/go.mod index 7504c024a85e..d7c17e334518 100644 --- a/exporter/elasticsearchexporter/go.mod +++ b/exporter/elasticsearchexporter/go.mod @@ -8,8 +8,8 @@ require ( github.com/elastic/go-structform v0.0.9 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 @@ -20,8 +20,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -29,20 +28,15 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/elasticsearchexporter/go.sum b/exporter/elasticsearchexporter/go.sum index fbe644c00b11..8190621adcc8 100644 --- a/exporter/elasticsearchexporter/go.sum +++ b/exporter/elasticsearchexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -22,16 +19,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -43,9 +32,6 @@ github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZN github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -53,7 +39,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -70,18 +55,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -91,13 +72,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -127,8 +105,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -169,22 +147,17 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -194,20 +167,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -231,20 +203,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -260,22 +228,18 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -296,22 +260,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -321,19 +279,13 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go index 1aabbef4eba0..d0ae365ed838 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel.go @@ -51,7 +51,7 @@ import ( "github.com/elastic/go-structform" "github.com/elastic/go-structform/json" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) // Document is an intermediate representation for converting open telemetry records with arbitrary attributes @@ -104,7 +104,7 @@ type idValue interface { // DocumentFromAttributes creates a document from a OpenTelemetry attribute // map. All nested maps will be flattened, with keys being joined using a `.` symbol. -func DocumentFromAttributes(am pdata.Map) Document { +func DocumentFromAttributes(am pcommon.Map) Document { return DocumentFromAttributesWithPath("", am) } @@ -112,7 +112,7 @@ func DocumentFromAttributes(am pdata.Map) Document { // map. All nested maps will be flattened, with keys being joined using a `.` symbol. // // All keys in the map will be prefixed with path. -func DocumentFromAttributesWithPath(path string, am pdata.Map) Document { +func DocumentFromAttributesWithPath(path string, am pcommon.Map) Document { if am.Len() == 0 { return Document{} } @@ -123,7 +123,7 @@ func DocumentFromAttributesWithPath(path string, am pdata.Map) Document { } // AddTimestamp adds a raw timestamp value to the Document. -func (doc *Document) AddTimestamp(key string, ts pdata.Timestamp) { +func (doc *Document) AddTimestamp(key string, ts pcommon.Timestamp) { doc.Add(key, TimestampValue(ts.AsTime())) } @@ -154,17 +154,17 @@ func (doc *Document) AddInt(key string, value int64) { // AddAttributes expands and flattens all key-value pairs from the input attribute map into // the document. -func (doc *Document) AddAttributes(key string, attributes pdata.Map) { +func (doc *Document) AddAttributes(key string, attributes pcommon.Map) { doc.fields = appendAttributeFields(doc.fields, key, attributes) } // AddAttribute converts and adds a AttributeValue to the document. If the attribute represents a map, // the fields will be flattened. -func (doc *Document) AddAttribute(key string, attribute pdata.Value) { +func (doc *Document) AddAttribute(key string, attribute pcommon.Value) { switch attribute.Type() { - case pdata.ValueTypeEmpty: + case pcommon.ValueTypeEmpty: // do not add 'null' - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: doc.AddAttributes(key, attribute.MapVal()) default: doc.Add(key, ValueFromAttribute(attribute)) @@ -367,20 +367,20 @@ func TimestampValue(ts time.Time) Value { } // ValueFromAttribute converts a AttributeValue into a value. -func ValueFromAttribute(attr pdata.Value) Value { +func ValueFromAttribute(attr pcommon.Value) Value { switch attr.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return IntValue(attr.IntVal()) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return DoubleValue(attr.DoubleVal()) - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return StringValue(attr.StringVal()) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return BoolValue(attr.BoolVal()) - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: sub := arrFromAttributes(attr.SliceVal()) return ArrValue(sub...) - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: sub := DocumentFromAttributes(attr.MapVal()) return Value{kind: KindObject, doc: sub} default: @@ -464,7 +464,7 @@ func (v *Value) iterJSON(w *json.Visitor, dedot bool) error { return nil } -func arrFromAttributes(aa pdata.Slice) []Value { +func arrFromAttributes(aa pcommon.Slice) []Value { if aa.Len() == 0 { return nil } @@ -476,20 +476,20 @@ func arrFromAttributes(aa pdata.Slice) []Value { return values } -func appendAttributeFields(fields []field, path string, am pdata.Map) []field { - am.Range(func(k string, val pdata.Value) bool { +func appendAttributeFields(fields []field, path string, am pcommon.Map) []field { + am.Range(func(k string, val pcommon.Value) bool { fields = appendAttributeValue(fields, path, k, val) return true }) return fields } -func appendAttributeValue(fields []field, path string, key string, attr pdata.Value) []field { - if attr.Type() == pdata.ValueTypeEmpty { +func appendAttributeValue(fields []field, path string, key string, attr pcommon.Value) []field { + if attr.Type() == pcommon.ValueTypeEmpty { return fields } - if attr.Type() == pdata.ValueTypeMap { + if attr.Type() == pcommon.ValueTypeMap { return appendAttributeFields(fields, flattenKey(path, key), attr.MapVal()) } diff --git a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go index 7f69a20f1fb2..a8e8d4f93788 100644 --- a/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go +++ b/exporter/elasticsearchexporter/internal/objmodel/objmodel_test.go @@ -23,7 +23,7 @@ import ( "github.com/elastic/go-structform/json" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) var dijkstra = time.Date(1930, 5, 11, 16, 33, 11, 123456789, time.UTC) @@ -35,12 +35,12 @@ func TestObjectModel_CreateMap(t *testing.T) { }{ "from empty map": { build: func() Document { - return DocumentFromAttributes(pdata.NewMap()) + return DocumentFromAttributes(pcommon.NewMap()) }, }, "from map": { build: func() Document { - return DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + return DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "i": 42, "str": "test", })) @@ -49,7 +49,7 @@ func TestObjectModel_CreateMap(t *testing.T) { }, "ignores nil values": { build: func() Document { - return DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + return DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "null": nil, "str": "test", })) @@ -58,7 +58,7 @@ func TestObjectModel_CreateMap(t *testing.T) { }, "from map with prefix": { build: func() Document { - return DocumentFromAttributesWithPath("prefix", pdata.NewMapFromRaw(map[string]interface{}{ + return DocumentFromAttributesWithPath("prefix", pcommon.NewMapFromRaw(map[string]interface{}{ "i": 42, "str": "test", })) @@ -67,7 +67,7 @@ func TestObjectModel_CreateMap(t *testing.T) { }, "add attributes with key": { build: func() (doc Document) { - doc.AddAttributes("prefix", pdata.NewMapFromRaw(map[string]interface{}{ + doc.AddAttributes("prefix", pcommon.NewMapFromRaw(map[string]interface{}{ "i": 42, "str": "test", })) @@ -77,7 +77,7 @@ func TestObjectModel_CreateMap(t *testing.T) { }, "add attribute flattens a map value": { build: func() (doc Document) { - mapVal := pdata.NewValueMap() + mapVal := pcommon.NewValueMap() m := mapVal.MapVal() m.InsertInt("i", 42) m.InsertString("str", "test") @@ -155,10 +155,10 @@ func TestObjectModel_Dedup(t *testing.T) { }, "duplicate after flattening from map: namespace object at end": { build: func() Document { - namespace := pdata.NewValueMap() + namespace := pcommon.NewValueMap() namespace.MapVal().InsertInt("a", 23) - am := pdata.NewMap() + am := pcommon.NewMap() am.InsertInt("namespace.a", 42) am.InsertString("toplevel", "test") am.Insert("namespace", namespace) @@ -168,10 +168,10 @@ func TestObjectModel_Dedup(t *testing.T) { }, "duplicate after flattening from map: namespace object at beginning": { build: func() Document { - namespace := pdata.NewValueMap() + namespace := pcommon.NewValueMap() namespace.MapVal().InsertInt("a", 23) - am := pdata.NewMap() + am := pcommon.NewMap() am.Insert("namespace", namespace) am.InsertInt("namespace.a", 42) am.InsertString("toplevel", "test") @@ -226,51 +226,51 @@ func TestObjectModel_Dedup(t *testing.T) { func TestValue_FromAttribute(t *testing.T) { tests := map[string]struct { - in pdata.Value + in pcommon.Value want Value }{ "null": { - in: pdata.NewValueEmpty(), + in: pcommon.NewValueEmpty(), want: nilValue, }, "string": { - in: pdata.NewValueString("test"), + in: pcommon.NewValueString("test"), want: StringValue("test"), }, "int": { - in: pdata.NewValueInt(23), + in: pcommon.NewValueInt(23), want: IntValue(23), }, "double": { - in: pdata.NewValueDouble(3.14), + in: pcommon.NewValueDouble(3.14), want: DoubleValue(3.14), }, "bool": { - in: pdata.NewValueBool(true), + in: pcommon.NewValueBool(true), want: BoolValue(true), }, "empty array": { - in: pdata.NewValueSlice(), + in: pcommon.NewValueSlice(), want: Value{kind: KindArr}, }, "non-empty array": { - in: func() pdata.Value { - v := pdata.NewValueSlice() + in: func() pcommon.Value { + v := pcommon.NewValueSlice() tgt := v.SliceVal().AppendEmpty() - pdata.NewValueInt(1).CopyTo(tgt) + pcommon.NewValueInt(1).CopyTo(tgt) return v }(), want: ArrValue(IntValue(1)), }, "empty map": { - in: pdata.NewValueMap(), + in: pcommon.NewValueMap(), want: Value{kind: KindObject}, }, "non-empty map": { - in: func() pdata.Value { - v := pdata.NewValueMap() + in: func() pcommon.Value { + v := pcommon.NewValueMap() m := v.MapVal() - m.Insert("a", pdata.NewValueInt(1)) + m.Insert("a", pcommon.NewValueInt(1)) return v }(), want: Value{kind: KindObject, doc: Document{[]field{{"a", IntValue(1)}}}}, @@ -291,28 +291,28 @@ func TestDocument_Serialize_Flat(t *testing.T) { want string }{ "no nesting with multiple fields": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a": "test", "b": 1, })), want: `{"a":"test","b":1}`, }, "shared prefix": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a.str": "test", "a.i": 1, })), want: `{"a.i":1,"a.str":"test"}`, }, "multiple namespaces with dot": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a.str": "test", "b.i": 1, })), want: `{"a.str":"test","b.i":1}`, }, "nested maps": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a": map[string]interface{}{ "str": "test", "i": 1, @@ -321,7 +321,7 @@ func TestDocument_Serialize_Flat(t *testing.T) { want: `{"a.i":1,"a.str":"test"}`, }, "multi-level nested namespace maps": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a": map[string]interface{}{ "b.str": "test", "i": 1, @@ -349,28 +349,28 @@ func TestDocument_Serialize_Dedot(t *testing.T) { want string }{ "no nesting with multiple fields": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a": "test", "b": 1, })), want: `{"a":"test","b":1}`, }, "shared prefix": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a.str": "test", "a.i": 1, })), want: `{"a":{"i":1,"str":"test"}}`, }, "multiple namespaces": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a.str": "test", "b.i": 1, })), want: `{"a":{"str":"test"},"b":{"i":1}}`, }, "nested maps": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a": map[string]interface{}{ "str": "test", "i": 1, @@ -379,7 +379,7 @@ func TestDocument_Serialize_Dedot(t *testing.T) { want: `{"a":{"i":1,"str":"test"}}`, }, "multi-level nested namespace maps": { - doc: DocumentFromAttributes(pdata.NewMapFromRaw(map[string]interface{}{ + doc: DocumentFromAttributes(pcommon.NewMapFromRaw(map[string]interface{}{ "a": map[string]interface{}{ "b.c.str": "test", "i": 1, diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index 849755884188..64c0f1dbf41c 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -17,13 +17,14 @@ package elasticsearchexporter // import "github.com/open-telemetry/opentelemetry import ( "bytes" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/objmodel" ) type mappingModel interface { - encodeLog(pdata.Resource, pdata.LogRecord) ([]byte, error) + encodeLog(pcommon.Resource, plog.LogRecord) ([]byte, error) } // encodeModel tries to keep the event as close to the original open telemetry semantics as is. @@ -37,7 +38,7 @@ type encodeModel struct { dedot bool } -func (m *encodeModel) encodeLog(resource pdata.Resource, record pdata.LogRecord) ([]byte, error) { +func (m *encodeModel) encodeLog(resource pcommon.Resource, record plog.LogRecord) ([]byte, error) { var document objmodel.Document document.AddTimestamp("@timestamp", record.Timestamp()) // We use @timestamp in order to ensure that we can index if the default data stream logs template is used. document.AddID("TraceId", record.TraceID()) diff --git a/exporter/f5cloudexporter/go.mod b/exporter/f5cloudexporter/go.mod index 4464bdb37871..37cbc2ed3639 100644 --- a/exporter/f5cloudexporter/go.mod +++ b/exporter/f5cloudexporter/go.mod @@ -5,14 +5,14 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a google.golang.org/api v0.74.0 ) require ( cloud.google.com/go/compute v1.5.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect @@ -23,7 +23,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -32,9 +32,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.1 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -54,3 +53,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/f5cloudexporter/go.sum b/exporter/f5cloudexporter/go.sum index 599d753a9216..4e285879decd 100644 --- a/exporter/f5cloudexporter/go.sum +++ b/exporter/f5cloudexporter/go.sum @@ -68,8 +68,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -196,7 +196,6 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -232,8 +231,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -284,8 +283,6 @@ github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -310,10 +307,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -324,7 +321,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= diff --git a/exporter/fileexporter/file_exporter.go b/exporter/fileexporter/file_exporter.go index b6b7f9e4ed70..372037d3c442 100644 --- a/exporter/fileexporter/file_exporter.go +++ b/exporter/fileexporter/file_exporter.go @@ -22,14 +22,15 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) // Marshaler configuration used for marhsaling Protobuf to JSON. -var tracesMarshaler = otlp.NewJSONTracesMarshaler() -var metricsMarshaler = otlp.NewJSONMetricsMarshaler() -var logsMarshaler = otlp.NewJSONLogsMarshaler() +var tracesMarshaler = ptrace.NewJSONMarshaler() +var metricsMarshaler = pmetric.NewJSONMarshaler() +var logsMarshaler = plog.NewJSONMarshaler() // fileExporter is the implementation of file exporter that writes telemetry data to a file // in Protobuf-JSON format. @@ -43,7 +44,7 @@ func (e *fileExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (e *fileExporter) ConsumeTraces(_ context.Context, td pdata.Traces) error { +func (e *fileExporter) ConsumeTraces(_ context.Context, td ptrace.Traces) error { buf, err := tracesMarshaler.MarshalTraces(td) if err != nil { return err @@ -51,7 +52,7 @@ func (e *fileExporter) ConsumeTraces(_ context.Context, td pdata.Traces) error { return exportMessageAsLine(e, buf) } -func (e *fileExporter) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +func (e *fileExporter) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { buf, err := metricsMarshaler.MarshalMetrics(md) if err != nil { return err @@ -59,7 +60,7 @@ func (e *fileExporter) ConsumeMetrics(_ context.Context, md pdata.Metrics) error return exportMessageAsLine(e, buf) } -func (e *fileExporter) ConsumeLogs(_ context.Context, ld pdata.Logs) error { +func (e *fileExporter) ConsumeLogs(_ context.Context, ld plog.Logs) error { buf, err := logsMarshaler.MarshalLogs(ld) if err != nil { return err diff --git a/exporter/fileexporter/file_exporter_test.go b/exporter/fileexporter/file_exporter_test.go index 6b88ba7ea92c..9f067ecaf71c 100644 --- a/exporter/fileexporter/file_exporter_test.go +++ b/exporter/fileexporter/file_exporter_test.go @@ -23,7 +23,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" ) @@ -37,7 +39,7 @@ func TestFileTracesExporter(t *testing.T) { assert.NoError(t, fe.ConsumeTraces(context.Background(), td)) assert.NoError(t, fe.Shutdown(context.Background())) - unmarshaler := otlp.NewJSONTracesUnmarshaler() + unmarshaler := ptrace.NewJSONUnmarshaler() buf, err := ioutil.ReadFile(fe.path) assert.NoError(t, err) got, err := unmarshaler.UnmarshalTraces(buf) @@ -65,7 +67,7 @@ func TestFileMetricsExporter(t *testing.T) { assert.NoError(t, fe.ConsumeMetrics(context.Background(), md)) assert.NoError(t, fe.Shutdown(context.Background())) - unmarshaler := otlp.NewJSONMetricsUnmarshaler() + unmarshaler := pmetric.NewJSONUnmarshaler() buf, err := ioutil.ReadFile(fe.path) assert.NoError(t, err) got, err := unmarshaler.UnmarshalMetrics(buf) @@ -93,7 +95,7 @@ func TestFileLogsExporter(t *testing.T) { assert.NoError(t, fe.ConsumeLogs(context.Background(), ld)) assert.NoError(t, fe.Shutdown(context.Background())) - unmarshaler := otlp.NewJSONLogsUnmarshaler() + unmarshaler := plog.NewJSONUnmarshaler() buf, err := ioutil.ReadFile(fe.path) assert.NoError(t, err) got, err := unmarshaler.UnmarshalLogs(buf) diff --git a/exporter/fileexporter/go.mod b/exporter/fileexporter/go.mod index 98ef10da7f9d..ebaa1c34a86f 100644 --- a/exporter/fileexporter/go.mod +++ b/exporter/fileexporter/go.mod @@ -6,22 +6,20 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -29,12 +27,6 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) @@ -42,3 +34,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent => ../../internal/sharedcomponent + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/fileexporter/go.sum b/exporter/fileexporter/go.sum index 11e5fe20c05c..489719c90391 100644 --- a/exporter/fileexporter/go.sum +++ b/exporter/fileexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,35 +15,23 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -63,18 +48,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -84,13 +65,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -120,8 +98,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -159,21 +137,16 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -183,20 +156,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -220,20 +192,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -249,21 +217,17 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -284,22 +248,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -309,19 +267,13 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/googlecloudexporter/go.mod b/exporter/googlecloudexporter/go.mod index fc6a7a7ef611..585407264e4a 100644 --- a/exporter/googlecloudexporter/go.mod +++ b/exporter/googlecloudexporter/go.mod @@ -10,8 +10,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 google.golang.org/api v0.74.0 google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb google.golang.org/grpc v1.45.0 @@ -24,7 +25,7 @@ require ( cloud.google.com/go/trace v1.0.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.0.0 // indirect github.com/aws/aws-sdk-go v1.43.32 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -35,15 +36,14 @@ require ( github.com/googleapis/gax-go/v2 v2.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/otel/sdk v1.6.3 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect @@ -61,3 +61,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/googlecloudexporter/go.sum b/exporter/googlecloudexporter/go.sum index 87dc54416439..09716b72def3 100644 --- a/exporter/googlecloudexporter/go.sum +++ b/exporter/googlecloudexporter/go.sum @@ -57,7 +57,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/trace v1.0.0 h1:laKx2y7IWMjguCe5zZx6n7qLtREk4kyE69SXVC0VSN8= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= contrib.go.opencensus.io/exporter/stackdriver v0.13.11 h1:YzmWJ2OT2K3ouXyMm5FmFQPoDs5TfLjx6Xn5x5CLN0I= contrib.go.opencensus.io/exporter/stackdriver v0.13.11/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -98,8 +98,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -152,10 +152,12 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -251,8 +253,6 @@ github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTK github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3 h1:eHv/jVY/JNop1xg2J9cBb4EzyMpWZoNCP1BslSAIkOI= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3/go.mod h1:h/KNeRx7oYU4SpA4SoY7W2/NxDKEEVuwA6j9A27L4OI= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -301,8 +301,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -373,6 +373,7 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -383,6 +384,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -403,12 +405,11 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -424,8 +425,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -440,11 +441,14 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.24.0/go.mod h1:7W3JSDYTtH3qKKHrS1fMiwLtK7iZFLPq1+7htfspX/E= @@ -461,7 +465,7 @@ go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOU go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= -go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= +go.opentelemetry.io/otel/exporters/prometheus v0.29.0/go.mod h1:Er2VVJQZbHysogooLNchdZ3MLYoI7+d15mHmrRlRJCU= go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= go.opentelemetry.io/otel/metric v0.23.0/go.mod h1:G/Nn9InyNnIv7J6YVkQfpc0JCfKBNJaERBGw08nqmVQ= @@ -471,10 +475,10 @@ go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujX go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= +go.opentelemetry.io/otel/sdk/metric v0.29.0/go.mod h1:IFkFNKI8Gq8zBdqOKdODCL9+LInBZLXaGpqSIKphNuU= go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= @@ -675,7 +679,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/exporter/googlecloudexporter/legacymetrics.go b/exporter/googlecloudexporter/legacymetrics.go index a1f6e3557a76..b2cca7bf853a 100644 --- a/exporter/googlecloudexporter/legacymetrics.go +++ b/exporter/googlecloudexporter/legacymetrics.go @@ -29,8 +29,8 @@ import ( "go.opencensus.io/stats/view" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pmetric" "google.golang.org/api/option" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -148,7 +148,7 @@ func newLegacyGoogleCloudMetricsExporter(cfg *LegacyConfig, set component.Export } // pushMetrics calls StackdriverExporter.PushMetricsProto on each element of the given metrics -func (me *metricsExporter) pushMetrics(ctx context.Context, m pdata.Metrics) error { +func (me *metricsExporter) pushMetrics(ctx context.Context, m pmetric.Metrics) error { rms := m.ResourceMetrics() mds := make([]*agentmetricspb.ExportMetricsServiceRequest, 0, rms.Len()) for i := 0; i < rms.Len(); i++ { diff --git a/exporter/googlecloudpubsubexporter/exporter.go b/exporter/googlecloudpubsubexporter/exporter.go index ae12c415cff1..439ec385d7ea 100644 --- a/exporter/googlecloudpubsubexporter/exporter.go +++ b/exporter/googlecloudpubsubexporter/exporter.go @@ -24,7 +24,9 @@ import ( pubsub "cloud.google.com/go/pubsub/apiv1" "github.com/google/uuid" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "google.golang.org/api/option" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" @@ -42,11 +44,11 @@ type pubsubExporter struct { ceSource string ceCompression compression config *Config - tracesMarshaler pdata.TracesMarshaler + tracesMarshaler ptrace.Marshaler tracesWatermarkFunc tracesWatermarkFunc - metricsMarshaler pdata.MetricsMarshaler + metricsMarshaler pmetric.Marshaler metricsWatermarkFunc metricsWatermarkFunc - logsMarshaler pdata.LogsMarshaler + logsMarshaler plog.Marshaler logsWatermarkFunc logsWatermarkFunc } @@ -183,7 +185,7 @@ func (ex *pubsubExporter) compress(payload []byte) ([]byte, error) { return payload, nil } -func (ex *pubsubExporter) consumeTraces(ctx context.Context, traces pdata.Traces) error { +func (ex *pubsubExporter) consumeTraces(ctx context.Context, traces ptrace.Traces) error { buffer, err := ex.tracesMarshaler.MarshalTraces(traces) if err != nil { return err @@ -191,7 +193,7 @@ func (ex *pubsubExporter) consumeTraces(ctx context.Context, traces pdata.Traces return ex.publishMessage(ctx, otlpProtoTrace, buffer, ex.tracesWatermarkFunc(traces, time.Now(), ex.config.Watermark.AllowedDrift).UTC()) } -func (ex *pubsubExporter) consumeMetrics(ctx context.Context, metrics pdata.Metrics) error { +func (ex *pubsubExporter) consumeMetrics(ctx context.Context, metrics pmetric.Metrics) error { buffer, err := ex.metricsMarshaler.MarshalMetrics(metrics) if err != nil { return err @@ -199,7 +201,7 @@ func (ex *pubsubExporter) consumeMetrics(ctx context.Context, metrics pdata.Metr return ex.publishMessage(ctx, otlpProtoMetric, buffer, ex.metricsWatermarkFunc(metrics, time.Now(), ex.config.Watermark.AllowedDrift).UTC()) } -func (ex *pubsubExporter) consumeLogs(ctx context.Context, logs pdata.Logs) error { +func (ex *pubsubExporter) consumeLogs(ctx context.Context, logs plog.Logs) error { buffer, err := ex.logsMarshaler.MarshalLogs(logs) if err != nil { return err diff --git a/exporter/googlecloudpubsubexporter/exporter_test.go b/exporter/googlecloudpubsubexporter/exporter_test.go index 27d096ff6949..b2db58395d37 100644 --- a/exporter/googlecloudpubsubexporter/exporter_test.go +++ b/exporter/googlecloudpubsubexporter/exporter_test.go @@ -23,7 +23,9 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/api/option" pb "google.golang.org/genproto/googleapis/pubsub/v1" ) @@ -81,9 +83,9 @@ func TestExporterDefaultSettings(t *testing.T) { } exporter := ensureExporter(componenttest.NewNopExporterCreateSettings(), exporterConfig) assert.NoError(t, exporter.start(ctx, nil)) - assert.NoError(t, exporter.consumeTraces(ctx, pdata.NewTraces())) - assert.NoError(t, exporter.consumeMetrics(ctx, pdata.NewMetrics())) - assert.NoError(t, exporter.consumeLogs(ctx, pdata.NewLogs())) + assert.NoError(t, exporter.consumeTraces(ctx, ptrace.NewTraces())) + assert.NoError(t, exporter.consumeMetrics(ctx, pmetric.NewMetrics())) + assert.NoError(t, exporter.consumeLogs(ctx, plog.NewLogs())) assert.NoError(t, exporter.shutdown(ctx)) } @@ -111,8 +113,8 @@ func TestExporterCompression(t *testing.T) { exporterConfig.Compression = "gzip" exporter := ensureExporter(componenttest.NewNopExporterCreateSettings(), exporterConfig) assert.NoError(t, exporter.start(ctx, nil)) - assert.NoError(t, exporter.consumeTraces(ctx, pdata.NewTraces())) - assert.NoError(t, exporter.consumeMetrics(ctx, pdata.NewMetrics())) - assert.NoError(t, exporter.consumeLogs(ctx, pdata.NewLogs())) + assert.NoError(t, exporter.consumeTraces(ctx, ptrace.NewTraces())) + assert.NoError(t, exporter.consumeMetrics(ctx, pmetric.NewMetrics())) + assert.NoError(t, exporter.consumeLogs(ctx, plog.NewLogs())) assert.NoError(t, exporter.shutdown(ctx)) } diff --git a/exporter/googlecloudpubsubexporter/factory.go b/exporter/googlecloudpubsubexporter/factory.go index cff578eafcd7..1a3e475e09fd 100644 --- a/exporter/googlecloudpubsubexporter/factory.go +++ b/exporter/googlecloudpubsubexporter/factory.go @@ -24,7 +24,9 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) const ( @@ -55,9 +57,9 @@ func ensureExporter(params component.ExporterCreateSettings, pCfg *Config) *pubs userAgent: strings.ReplaceAll(pCfg.UserAgent, "{{version}}", params.BuildInfo.Version), ceSource: fmt.Sprintf("/opentelemetry/collector/%s/%s", name, params.BuildInfo.Version), config: pCfg, - tracesMarshaler: otlp.NewProtobufTracesMarshaler(), - metricsMarshaler: otlp.NewProtobufMetricsMarshaler(), - logsMarshaler: otlp.NewProtobufLogsMarshaler(), + tracesMarshaler: ptrace.NewProtoMarshaler(), + metricsMarshaler: pmetric.NewProtoMarshaler(), + logsMarshaler: plog.NewProtoMarshaler(), } // we ignore the error here as the config is already validated with the same method receiver.ceCompression, _ = pCfg.parseCompression() diff --git a/exporter/googlecloudpubsubexporter/go.mod b/exporter/googlecloudpubsubexporter/go.mod index 6b4ba9080609..9a3de678b3ec 100644 --- a/exporter/googlecloudpubsubexporter/go.mod +++ b/exporter/googlecloudpubsubexporter/go.mod @@ -6,8 +6,8 @@ require ( cloud.google.com/go/pubsub v1.19.0 github.com/google/uuid v1.3.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/api v0.74.0 google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb @@ -18,7 +18,7 @@ require ( cloud.google.com/go v0.100.2 // indirect cloud.google.com/go/compute v1.5.0 // indirect cloud.google.com/go/iam v0.3.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -26,7 +26,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.7 // indirect github.com/googleapis/gax-go/v2 v2.2.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -34,7 +34,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -50,3 +49,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/googlecloudpubsubexporter/go.sum b/exporter/googlecloudpubsubexporter/go.sum index 39c79252581c..e8a0ad89baf4 100644 --- a/exporter/googlecloudpubsubexporter/go.sum +++ b/exporter/googlecloudpubsubexporter/go.sum @@ -76,8 +76,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -205,7 +205,6 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTKaONwE= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -240,8 +239,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -290,8 +289,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -316,17 +313,17 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= diff --git a/exporter/googlecloudpubsubexporter/watermark.go b/exporter/googlecloudpubsubexporter/watermark.go index 62ced899bff6..6a48cb847459 100644 --- a/exporter/googlecloudpubsubexporter/watermark.go +++ b/exporter/googlecloudpubsubexporter/watermark.go @@ -17,14 +17,17 @@ package googlecloudpubsubexporter // import "github.com/open-telemetry/opentelem import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) -type metricsWatermarkFunc func(metrics pdata.Metrics, processingTime time.Time, allowedDrift time.Duration) time.Time -type logsWatermarkFunc func(logs pdata.Logs, processingTime time.Time, allowedDrift time.Duration) time.Time -type tracesWatermarkFunc func(traces pdata.Traces, processingTime time.Time, allowedDrift time.Duration) time.Time +type metricsWatermarkFunc func(metrics pmetric.Metrics, processingTime time.Time, allowedDrift time.Duration) time.Time +type logsWatermarkFunc func(logs plog.Logs, processingTime time.Time, allowedDrift time.Duration) time.Time +type tracesWatermarkFunc func(traces ptrace.Traces, processingTime time.Time, allowedDrift time.Duration) time.Time -type collectFunc func(timestamp pdata.Timestamp) bool +type collectFunc func(timestamp pcommon.Timestamp) bool // collector helps traverse the OTLP tree to calculate the final time to set to the ce-time attribute type collector struct { @@ -38,7 +41,7 @@ type collector struct { // add a new timestamp, and set the calculated time if it's earlier then the current calculated, // taking into account the allowedDrift -func (c *collector) earliest(timestamp pdata.Timestamp) bool { +func (c *collector) earliest(timestamp pcommon.Timestamp) bool { t := timestamp.AsTime() if t.Before(c.calculatedTime) { min := c.processingTime.Add(-c.allowedDrift) @@ -52,12 +55,12 @@ func (c *collector) earliest(timestamp pdata.Timestamp) bool { } // function that doesn't traverse the metric data, return the processingTime -func currentMetricsWatermark(_ pdata.Metrics, processingTime time.Time, _ time.Duration) time.Time { +func currentMetricsWatermark(_ pmetric.Metrics, processingTime time.Time, _ time.Duration) time.Time { return processingTime } // function that traverse the metric data, and returns the earliest timestamp (within limits of the allowedDrift) -func earliestMetricsWatermark(metrics pdata.Metrics, processingTime time.Time, allowedDrift time.Duration) time.Time { +func earliestMetricsWatermark(metrics pmetric.Metrics, processingTime time.Time, allowedDrift time.Duration) time.Time { collector := &collector{ processingTime: processingTime, allowedDrift: allowedDrift, @@ -68,7 +71,7 @@ func earliestMetricsWatermark(metrics pdata.Metrics, processingTime time.Time, a } // traverse the metric data, with a collectFunc -func traverseMetrics(metrics pdata.Metrics, collect collectFunc) { +func traverseMetrics(metrics pmetric.Metrics, collect collectFunc) { for rix := 0; rix < metrics.ResourceMetrics().Len(); rix++ { r := metrics.ResourceMetrics().At(rix) for lix := 0; lix < r.ScopeMetrics().Len(); lix++ { @@ -76,35 +79,35 @@ func traverseMetrics(metrics pdata.Metrics, collect collectFunc) { for dix := 0; dix < l.Metrics().Len(); dix++ { d := l.Metrics().At(dix) switch d.DataType() { - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: for pix := 0; pix < d.Histogram().DataPoints().Len(); pix++ { p := d.Histogram().DataPoints().At(pix) if collect(p.Timestamp()) { return } } - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: for pix := 0; pix < d.ExponentialHistogram().DataPoints().Len(); pix++ { p := d.ExponentialHistogram().DataPoints().At(pix) if collect(p.Timestamp()) { return } } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: for pix := 0; pix < d.Sum().DataPoints().Len(); pix++ { p := d.Sum().DataPoints().At(pix) if collect(p.Timestamp()) { return } } - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: for pix := 0; pix < d.Gauge().DataPoints().Len(); pix++ { p := d.Gauge().DataPoints().At(pix) if collect(p.Timestamp()) { return } } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: for pix := 0; pix < d.Summary().DataPoints().Len(); pix++ { p := d.Summary().DataPoints().At(pix) if collect(p.Timestamp()) { @@ -118,12 +121,12 @@ func traverseMetrics(metrics pdata.Metrics, collect collectFunc) { } // function that doesn't traverse the log data, return the processingTime -func currentLogsWatermark(_ pdata.Logs, processingTime time.Time, _ time.Duration) time.Time { +func currentLogsWatermark(_ plog.Logs, processingTime time.Time, _ time.Duration) time.Time { return processingTime } // function that traverse the log data, and returns the earliest timestamp (within limits of the allowedDrift) -func earliestLogsWatermark(logs pdata.Logs, processingTime time.Time, allowedDrift time.Duration) time.Time { +func earliestLogsWatermark(logs plog.Logs, processingTime time.Time, allowedDrift time.Duration) time.Time { c := collector{ processingTime: processingTime, allowedDrift: allowedDrift, @@ -134,7 +137,7 @@ func earliestLogsWatermark(logs pdata.Logs, processingTime time.Time, allowedDri } // traverse the log data, with a collectFunc -func traverseLogs(logs pdata.Logs, collect collectFunc) { +func traverseLogs(logs plog.Logs, collect collectFunc) { for rix := 0; rix < logs.ResourceLogs().Len(); rix++ { r := logs.ResourceLogs().At(rix) for lix := 0; lix < r.ScopeLogs().Len(); lix++ { @@ -150,12 +153,12 @@ func traverseLogs(logs pdata.Logs, collect collectFunc) { } // function that doesn't traverse the trace data, return the processingTime -func currentTracesWatermark(_ pdata.Traces, processingTime time.Time, _ time.Duration) time.Time { +func currentTracesWatermark(_ ptrace.Traces, processingTime time.Time, _ time.Duration) time.Time { return processingTime } // function that traverse the trace data, and returns the earliest timestamp (within limits of the allowedDrift) -func earliestTracesWatermark(traces pdata.Traces, processingTime time.Time, allowedDrift time.Duration) time.Time { +func earliestTracesWatermark(traces ptrace.Traces, processingTime time.Time, allowedDrift time.Duration) time.Time { c := collector{ processingTime: processingTime, allowedDrift: allowedDrift, @@ -166,7 +169,7 @@ func earliestTracesWatermark(traces pdata.Traces, processingTime time.Time, allo } // traverse the trace data, with a collectFunc -func traverseTraces(traces pdata.Traces, collect collectFunc) { +func traverseTraces(traces ptrace.Traces, collect collectFunc) { for rix := 0; rix < traces.ResourceSpans().Len(); rix++ { r := traces.ResourceSpans().At(rix) for lix := 0; lix < r.ScopeSpans().Len(); lix++ { diff --git a/exporter/googlecloudpubsubexporter/watermark_test.go b/exporter/googlecloudpubsubexporter/watermark_test.go index 20c453b92a7b..e622ea523f0a 100644 --- a/exporter/googlecloudpubsubexporter/watermark_test.go +++ b/exporter/googlecloudpubsubexporter/watermark_test.go @@ -19,7 +19,10 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) var ( @@ -31,45 +34,45 @@ var ( tsAfter5m = tsRef.Add(5 * time.Minute) ) -var metricsData = func() pdata.Metrics { - d := pdata.NewMetrics() +var metricsData = func() pmetric.Metrics { + d := pmetric.NewMetrics() metric := d.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().DataPoints().AppendEmpty().SetTimestamp(pdata.NewTimestampFromTime(tsAfter30s)) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().DataPoints().AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsAfter30s)) metric = d.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeSummary) - metric.Summary().DataPoints().AppendEmpty().SetTimestamp(pdata.NewTimestampFromTime(tsAfter5m)) + metric.SetDataType(pmetric.MetricDataTypeSummary) + metric.Summary().DataPoints().AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsAfter5m)) metric = d.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) - metric.Gauge().DataPoints().AppendEmpty().SetTimestamp(pdata.NewTimestampFromTime(tsRef)) + metric.SetDataType(pmetric.MetricDataTypeGauge) + metric.Gauge().DataPoints().AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsRef)) metric = d.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().DataPoints().AppendEmpty().SetTimestamp(pdata.NewTimestampFromTime(tsBefore30s)) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().DataPoints().AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsBefore30s)) metric = d.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeExponentialHistogram) - metric.ExponentialHistogram().DataPoints().AppendEmpty().SetTimestamp(pdata.NewTimestampFromTime(tsBefore5m)) + metric.SetDataType(pmetric.MetricDataTypeExponentialHistogram) + metric.ExponentialHistogram().DataPoints().AppendEmpty().SetTimestamp(pcommon.NewTimestampFromTime(tsBefore5m)) return d }() -var tracesData = func() pdata.Traces { - d := pdata.NewTraces() +var tracesData = func() ptrace.Traces { + d := ptrace.NewTraces() span := d.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span.SetStartTimestamp(pdata.NewTimestampFromTime(tsRef)) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(tsRef)) span = d.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span.SetStartTimestamp(pdata.NewTimestampFromTime(tsBefore30s)) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(tsBefore30s)) span = d.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span.SetStartTimestamp(pdata.NewTimestampFromTime(tsBefore5m)) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(tsBefore5m)) return d }() -var logsData = func() pdata.Logs { - d := pdata.NewLogs() +var logsData = func() plog.Logs { + d := plog.NewLogs() log := d.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() - log.SetTimestamp(pdata.NewTimestampFromTime(tsRef)) + log.SetTimestamp(pcommon.NewTimestampFromTime(tsRef)) log = d.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() - log.SetTimestamp(pdata.NewTimestampFromTime(tsBefore30s)) + log.SetTimestamp(pcommon.NewTimestampFromTime(tsBefore30s)) log = d.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() - log.SetTimestamp(pdata.NewTimestampFromTime(tsBefore5m)) + log.SetTimestamp(pcommon.NewTimestampFromTime(tsBefore5m)) return d }() diff --git a/exporter/honeycombexporter/go.mod b/exporter/honeycombexporter/go.mod index 3198dcc78391..278d5b2e29a3 100644 --- a/exporter/honeycombexporter/go.mod +++ b/exporter/honeycombexporter/go.mod @@ -7,23 +7,22 @@ require ( github.com/honeycombio/libhoney-go v1.15.8 github.com/klauspost/compress v1.15.1 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 // indirect github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -31,7 +30,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect go.opencensus.io v0.23.0 // indirect @@ -39,13 +37,10 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect gopkg.in/alexcesaro/statsd.v2 v2.0.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/honeycombexporter/go.sum b/exporter/honeycombexporter/go.sum index 68d701c2af16..7be3e69737ca 100644 --- a/exporter/honeycombexporter/go.sum +++ b/exporter/honeycombexporter/go.sum @@ -1,10 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -20,19 +17,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -40,9 +29,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= @@ -62,7 +48,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -79,18 +64,15 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -105,8 +87,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -140,8 +120,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -183,21 +163,16 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -211,20 +186,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -248,20 +222,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -277,22 +247,18 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -313,22 +279,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -339,8 +299,6 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc= @@ -352,8 +310,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/honeycombexporter/honeycomb.go b/exporter/honeycombexporter/honeycomb.go index 381400fec6b5..ee6287be55a5 100644 --- a/exporter/honeycombexporter/honeycomb.go +++ b/exporter/honeycombexporter/honeycomb.go @@ -20,7 +20,7 @@ import ( "github.com/honeycombio/libhoney-go" "github.com/honeycombio/libhoney-go/transmission" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -101,7 +101,7 @@ func newHoneycombTracesExporter(cfg *Config, logger *zap.Logger) (*honeycombExpo // pushTraceData is the method called when trace data is available. It will be // responsible for sending a batch of events. -func (e *honeycombExporter) pushTraceData(ctx context.Context, td pdata.Traces) error { +func (e *honeycombExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { var errs error // Run the error logger. This just listens for messages in the error @@ -172,19 +172,19 @@ func (e *honeycombExporter) pushTraceData(ctx context.Context, td pdata.Traces) return errs } -func getSpanKind(kind pdata.SpanKind) string { +func getSpanKind(kind ptrace.SpanKind) string { switch kind { - case pdata.SpanKindClient: + case ptrace.SpanKindClient: return "client" - case pdata.SpanKindServer: + case ptrace.SpanKindServer: return "server" - case pdata.SpanKindProducer: + case ptrace.SpanKindProducer: return "producer" - case pdata.SpanKindConsumer: + case ptrace.SpanKindConsumer: return "consumer" - case pdata.SpanKindInternal: + case ptrace.SpanKindInternal: return "internal" - case pdata.SpanKindUnspecified: + case ptrace.SpanKindUnspecified: fallthrough default: return "unspecified" @@ -193,7 +193,7 @@ func getSpanKind(kind pdata.SpanKind) string { // sendSpanLinks gets the list of links associated with this span and sends them as // separate events to Honeycomb, with a span type "link". -func (e *honeycombExporter) sendSpanLinks(span pdata.Span) { +func (e *honeycombExporter) sendSpanLinks(span ptrace.Span) { links := span.Links() for i := 0; i < links.Len(); i++ { @@ -221,7 +221,7 @@ func (e *honeycombExporter) sendSpanLinks(span pdata.Span) { // sendMessageEvents gets the list of timeevents from the span and sends them as // separate events to Honeycomb, with a span type "span_event". -func (e *honeycombExporter) sendMessageEvents(span pdata.Span, resourceAttrs map[string]interface{}) { +func (e *honeycombExporter) sendMessageEvents(span ptrace.Span, resourceAttrs map[string]interface{}) { timeEvents := span.Events() for i := 0; i < timeEvents.Len(); i++ { diff --git a/exporter/honeycombexporter/honeycomb_test.go b/exporter/honeycombexporter/honeycomb_test.go index dea92d02747d..59982708bb15 100644 --- a/exporter/honeycombexporter/honeycomb_test.go +++ b/exporter/honeycombexporter/honeycomb_test.go @@ -31,7 +31,8 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -66,7 +67,7 @@ func testingServer(callback func(data []honeycombData)) *httptest.Server { })) } -func testTracesExporter(td pdata.Traces, t *testing.T, cfg *Config) []honeycombData { +func testTracesExporter(td ptrace.Traces, t *testing.T, cfg *Config) []honeycombData { var got []honeycombData server := testingServer(func(data []honeycombData) { got = append(got, data...) @@ -98,7 +99,7 @@ func baseConfig() *Config { } func TestExporter(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("service.name", "test_service") rs.Resource().Attributes().InsertString("A", "B") @@ -109,28 +110,28 @@ func TestExporter(t *testing.T) { lib.SetVersion("1.0.0") clientSpan := instrLibrarySpans.Spans().AppendEmpty() - clientSpan.SetTraceID(pdata.NewTraceID([16]byte{0x01})) - clientSpan.SetSpanID(pdata.NewSpanID([8]byte{0x03})) - clientSpan.SetParentSpanID(pdata.NewSpanID([8]byte{0x02})) + clientSpan.SetTraceID(pcommon.NewTraceID([16]byte{0x01})) + clientSpan.SetSpanID(pcommon.NewSpanID([8]byte{0x03})) + clientSpan.SetParentSpanID(pcommon.NewSpanID([8]byte{0x02})) clientSpan.SetName("client") - clientSpan.SetKind(pdata.SpanKindClient) + clientSpan.SetKind(ptrace.SpanKindClient) clientSpanLink := clientSpan.Links().AppendEmpty() - clientSpanLink.SetTraceID(pdata.NewTraceID([16]byte{0x04})) - clientSpanLink.SetSpanID(pdata.NewSpanID([8]byte{0x05})) + clientSpanLink.SetTraceID(pcommon.NewTraceID([16]byte{0x04})) + clientSpanLink.SetSpanID(pcommon.NewSpanID([8]byte{0x05})) clientSpanLink.Attributes().InsertInt("span_link_attr", 12345) serverSpan := instrLibrarySpans.Spans().AppendEmpty() - serverSpan.SetTraceID(pdata.NewTraceID([16]byte{0x01})) - serverSpan.SetSpanID(pdata.NewSpanID([8]byte{0x04})) - serverSpan.SetParentSpanID(pdata.NewSpanID([8]byte{0x03})) + serverSpan.SetTraceID(pcommon.NewTraceID([16]byte{0x01})) + serverSpan.SetSpanID(pcommon.NewSpanID([8]byte{0x04})) + serverSpan.SetParentSpanID(pcommon.NewSpanID([8]byte{0x03})) serverSpan.SetName("server") - serverSpan.SetKind(pdata.SpanKindServer) + serverSpan.SetKind(ptrace.SpanKindServer) rootSpan := instrLibrarySpans.Spans().AppendEmpty() - rootSpan.SetTraceID(pdata.NewTraceID([16]byte{0x01})) - rootSpan.SetSpanID(pdata.NewSpanID([8]byte{0x02})) + rootSpan.SetTraceID(pcommon.NewTraceID([16]byte{0x01})) + rootSpan.SetSpanID(pcommon.NewSpanID([8]byte{0x02})) rootSpan.SetName("root") - rootSpan.SetKind(pdata.SpanKindServer) + rootSpan.SetKind(ptrace.SpanKindServer) rootSpan.Attributes().InsertString("span_attr_name", "Span Attribute") rootSpan.Attributes().InsertString("B", "D") rootSpanEvent := rootSpan.Events().AppendEmpty() @@ -223,7 +224,7 @@ func TestExporter(t *testing.T) { } func TestSpanKinds(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("service.name", "test_service") instrLibrarySpans := rs.ScopeSpans().AppendEmpty() @@ -233,14 +234,14 @@ func TestSpanKinds(t *testing.T) { initSpan(instrLibrarySpans.Spans().AppendEmpty()) - spanKinds := []pdata.SpanKind{ - pdata.SpanKindInternal, - pdata.SpanKindClient, - pdata.SpanKindServer, - pdata.SpanKindProducer, - pdata.SpanKindConsumer, - pdata.SpanKindUnspecified, - pdata.SpanKind(1000), + spanKinds := []ptrace.SpanKind{ + ptrace.SpanKindInternal, + ptrace.SpanKindClient, + ptrace.SpanKindServer, + ptrace.SpanKindProducer, + ptrace.SpanKindConsumer, + ptrace.SpanKindUnspecified, + ptrace.SpanKind(1000), } expectedStrings := []string{ @@ -286,39 +287,39 @@ func TestSpanKinds(t *testing.T) { } } -func initSpan(span pdata.Span) { +func initSpan(span ptrace.Span) { span.SetName("spanName") - span.SetTraceID(pdata.NewTraceID([16]byte{0x01})) - span.SetParentSpanID(pdata.NewSpanID([8]byte{0x02})) - span.SetSpanID(pdata.NewSpanID([8]byte{0x03})) + span.SetTraceID(pcommon.NewTraceID([16]byte{0x01})) + span.SetParentSpanID(pcommon.NewSpanID([8]byte{0x02})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0x03})) span.Attributes().InsertString("span_attr_name", "Span Attribute") } func TestSampleRateAttribute(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() instrLibrarySpans := rs.ScopeSpans().AppendEmpty() intSampleRateSpan := instrLibrarySpans.Spans().AppendEmpty() - intSampleRateSpan.SetTraceID(pdata.NewTraceID([16]byte{0x01})) - intSampleRateSpan.SetSpanID(pdata.NewSpanID([8]byte{0x02})) + intSampleRateSpan.SetTraceID(pcommon.NewTraceID([16]byte{0x01})) + intSampleRateSpan.SetSpanID(pcommon.NewSpanID([8]byte{0x02})) intSampleRateSpan.SetName("root") - intSampleRateSpan.SetKind(pdata.SpanKindServer) + intSampleRateSpan.SetKind(ptrace.SpanKindServer) intSampleRateSpan.Attributes().InsertString("some_attribute", "A value") intSampleRateSpan.Attributes().InsertInt("hc.sample.rate", 13) noSampleRateSpan := instrLibrarySpans.Spans().AppendEmpty() - noSampleRateSpan.SetTraceID(pdata.NewTraceID([16]byte{0x01})) - noSampleRateSpan.SetSpanID(pdata.NewSpanID([8]byte{0x02})) + noSampleRateSpan.SetTraceID(pcommon.NewTraceID([16]byte{0x01})) + noSampleRateSpan.SetSpanID(pcommon.NewSpanID([8]byte{0x02})) noSampleRateSpan.SetName("root") - noSampleRateSpan.SetKind(pdata.SpanKindServer) + noSampleRateSpan.SetKind(ptrace.SpanKindServer) noSampleRateSpan.Attributes().InsertString("no_sample_rate", "gets_default") invalidSampleRateSpan := instrLibrarySpans.Spans().AppendEmpty() - invalidSampleRateSpan.SetTraceID(pdata.NewTraceID([16]byte{0x01})) - invalidSampleRateSpan.SetSpanID(pdata.NewSpanID([8]byte{0x02})) + invalidSampleRateSpan.SetTraceID(pcommon.NewTraceID([16]byte{0x01})) + invalidSampleRateSpan.SetSpanID(pcommon.NewSpanID([8]byte{0x02})) invalidSampleRateSpan.SetName("root") - invalidSampleRateSpan.SetKind(pdata.SpanKindServer) + invalidSampleRateSpan.SetKind(ptrace.SpanKindServer) invalidSampleRateSpan.Attributes().InsertString("hc.sample.rate", "wrong_type") cfg := baseConfig() diff --git a/exporter/honeycombexporter/ids.go b/exporter/honeycombexporter/ids.go index f291b49d78a0..d860a6b356c1 100644 --- a/exporter/honeycombexporter/ids.go +++ b/exporter/honeycombexporter/ids.go @@ -18,7 +18,7 @@ import ( "encoding/binary" "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) const ( @@ -34,7 +34,7 @@ const ( // one. // // [1]: https://github.com/jaegertracing/jaeger/blob/cd19b64413eca0f06b61d92fe29bebce1321d0b0/model/ids.go#L81 -func getHoneycombTraceID(traceID pdata.TraceID) string { +func getHoneycombTraceID(traceID pcommon.TraceID) string { // binary.BigEndian.Uint64() does a bounds check on traceID which will // cause a panic if traceID is fewer than 8 bytes. In this case, we don't // need to check for zero padding on the high part anyway, so just return a @@ -52,7 +52,7 @@ func getHoneycombTraceID(traceID pdata.TraceID) string { } // getHoneycombSpanID just takes a byte array and hex encodes it. -func getHoneycombSpanID(id pdata.SpanID) string { +func getHoneycombSpanID(id pcommon.SpanID) string { if !id.IsEmpty() { return fmt.Sprintf("%x", id.Bytes()) } diff --git a/exporter/honeycombexporter/ids_test.go b/exporter/honeycombexporter/ids_test.go index 7639e367370e..53166bb44f8f 100644 --- a/exporter/honeycombexporter/ids_test.go +++ b/exporter/honeycombexporter/ids_test.go @@ -18,23 +18,23 @@ import ( "reflect" "testing" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestGetHoneycombTraceID(t *testing.T) { tests := []struct { name string - traceID pdata.TraceID + traceID pcommon.TraceID want string }{ { name: "128-bit zero-padded traceID", - traceID: pdata.NewTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 203, 228, 222, 205, 18, 66, 145, 119}), + traceID: pcommon.NewTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 203, 228, 222, 205, 18, 66, 145, 119}), want: "cbe4decd12429177", }, { name: "128-bit non-zero-padded traceID", - traceID: pdata.NewTraceID([16]byte{242, 59, 66, 234, 194, 137, 160, 253, 205, 228, 143, 203, 227, 171, 26, 50}), + traceID: pcommon.NewTraceID([16]byte{242, 59, 66, 234, 194, 137, 160, 253, 205, 228, 143, 203, 227, 171, 26, 50}), want: "f23b42eac289a0fdcde48fcbe3ab1a32", }, } diff --git a/exporter/honeycombexporter/translator.go b/exporter/honeycombexporter/translator.go index 793c0d95fdc1..5531eeb1408b 100644 --- a/exporter/honeycombexporter/translator.go +++ b/exporter/honeycombexporter/translator.go @@ -17,23 +17,24 @@ package honeycombexporter // import "github.com/open-telemetry/opentelemetry-col import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) // spanAttributesToMap converts an opencensus proto Span_Attributes object into a map // of strings to generic types usable for sending events to honeycomb. -func spanAttributesToMap(spanAttrs pdata.Map) map[string]interface{} { +func spanAttributesToMap(spanAttrs pcommon.Map) map[string]interface{} { var attrs = make(map[string]interface{}, spanAttrs.Len()) - spanAttrs.Range(func(key string, value pdata.Value) bool { + spanAttrs.Range(func(key string, value pcommon.Value) bool { switch value.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: attrs[key] = value.StringVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: attrs[key] = value.BoolVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: attrs[key] = value.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: attrs[key] = value.DoubleVal() } return true @@ -43,7 +44,7 @@ func spanAttributesToMap(spanAttrs pdata.Map) map[string]interface{} { } // timestampToTime converts a protobuf timestamp into a time.Time. -func timestampToTime(ts pdata.Timestamp) (t time.Time) { +func timestampToTime(ts pcommon.Timestamp) (t time.Time) { if ts == 0 { return } @@ -51,12 +52,12 @@ func timestampToTime(ts pdata.Timestamp) (t time.Time) { } // getStatusCode returns the status code -func getStatusCode(status pdata.SpanStatus) int32 { +func getStatusCode(status ptrace.SpanStatus) int32 { return int32(status.Code()) } // getStatusMessage returns the status message as a string -func getStatusMessage(status pdata.SpanStatus) string { +func getStatusMessage(status ptrace.SpanStatus) string { if len(status.Message()) > 0 { return status.Message() } diff --git a/exporter/honeycombexporter/translator_test.go b/exporter/honeycombexporter/translator_test.go index 3263d2276499..fd50cbbe5e2b 100644 --- a/exporter/honeycombexporter/translator_test.go +++ b/exporter/honeycombexporter/translator_test.go @@ -19,26 +19,27 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/types/known/timestamppb" ) func TestSpanAttributesToMap(t *testing.T) { - spanAttrs := []pdata.Map{ - pdata.NewMapFromRaw(map[string]interface{}{ + spanAttrs := []pcommon.Map{ + pcommon.NewMapFromRaw(map[string]interface{}{ "foo": "bar", }), - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ "foo": 1234, }), - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ "foo": true, }), - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ "foo": 0.3145, }), - pdata.NewMap(), + pcommon.NewMap(), } wantResults := []map[string]interface{}{ @@ -63,14 +64,14 @@ func TestSpanAttributesToMap(t *testing.T) { func TestTimestampToTime(t *testing.T) { var t1 time.Time - emptyTime := timestampToTime(pdata.Timestamp(0)) + emptyTime := timestampToTime(pcommon.Timestamp(0)) if t1 != emptyTime { t.Errorf("Expected %+v, Got: %+v\n", t1, emptyTime) } t2 := time.Now() seconds := t2.UnixNano() / 1000000000 - nowTime := timestampToTime(pdata.NewTimestampFromTime( + nowTime := timestampToTime(pcommon.NewTimestampFromTime( (×tamppb.Timestamp{ Seconds: seconds, Nanos: int32(t2.UnixNano() - (seconds * 1000000000)), @@ -82,18 +83,18 @@ func TestTimestampToTime(t *testing.T) { } func TestStatusCode(t *testing.T) { - status := pdata.NewSpanStatus() - assert.Equal(t, int32(pdata.StatusCodeUnset), getStatusCode(status), "empty") + status := ptrace.NewSpanStatus() + assert.Equal(t, int32(ptrace.StatusCodeUnset), getStatusCode(status), "empty") - status.SetCode(pdata.StatusCodeError) - assert.Equal(t, int32(pdata.StatusCodeError), getStatusCode(status), "error") + status.SetCode(ptrace.StatusCodeError) + assert.Equal(t, int32(ptrace.StatusCodeError), getStatusCode(status), "error") - status.SetCode(pdata.StatusCodeOk) - assert.Equal(t, int32(pdata.StatusCodeOk), getStatusCode(status), "ok") + status.SetCode(ptrace.StatusCodeOk) + assert.Equal(t, int32(ptrace.StatusCodeOk), getStatusCode(status), "ok") } func TestStatusMessage(t *testing.T) { - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() assert.Equal(t, "STATUS_CODE_UNSET", getStatusMessage(status), "empty") status.SetMessage("custom message") diff --git a/exporter/humioexporter/go.mod b/exporter/humioexporter/go.mod index 690584d7120e..17886c57eb78 100644 --- a/exporter/humioexporter/go.mod +++ b/exporter/humioexporter/go.mod @@ -4,13 +4,14 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect @@ -20,7 +21,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -29,7 +30,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -38,13 +38,13 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/humioexporter/go.sum b/exporter/humioexporter/go.sum index c23dc3b4a972..a5c8e24ac2bd 100644 --- a/exporter/humioexporter/go.sum +++ b/exporter/humioexporter/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -18,16 +17,14 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -39,7 +36,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -93,7 +89,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -125,8 +120,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -175,9 +170,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -193,10 +185,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -207,7 +201,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -246,8 +240,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -272,13 +266,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -304,7 +296,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -314,7 +305,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -328,7 +318,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/exporter/humioexporter/traces_exporter.go b/exporter/humioexporter/traces_exporter.go index 01edb716c456..391c21c18862 100644 --- a/exporter/humioexporter/traces_exporter.go +++ b/exporter/humioexporter/traces_exporter.go @@ -21,8 +21,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) // HumioLink represents a relation between two spans @@ -85,7 +86,7 @@ func newTracesExporterWithClientGetter(cfg *Config, settings component.Telemetry } } -func (e *humioTracesExporter) pushTraceData(ctx context.Context, td pdata.Traces) error { +func (e *humioTracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { e.wg.Add(1) defer e.wg.Done() @@ -114,9 +115,9 @@ func (e *humioTracesExporter) pushTraceData(ctx context.Context, td pdata.Traces return conversionErr } -func (e *humioTracesExporter) tracesToHumioEvents(td pdata.Traces) ([]*HumioStructuredEvents, error) { +func (e *humioTracesExporter) tracesToHumioEvents(td ptrace.Traces) ([]*HumioStructuredEvents, error) { organizer := newTagOrganizer(e.cfg.Tag, tagFromSpan) - var droppedTraces []pdata.ResourceSpans + var droppedTraces []ptrace.ResourceSpans resSpans := td.ResourceSpans() for i := 0; i < resSpans.Len(); i++ { @@ -145,7 +146,7 @@ func (e *humioTracesExporter) tracesToHumioEvents(td pdata.Traces) ([]*HumioStru results := organizer.asEvents() if len(droppedTraces) > 0 { - dropped := pdata.NewTraces() + dropped := ptrace.NewTraces() for _, t := range droppedTraces { tgt := dropped.ResourceSpans().AppendEmpty() t.CopyTo(tgt) @@ -160,7 +161,7 @@ func (e *humioTracesExporter) tracesToHumioEvents(td pdata.Traces) ([]*HumioStru return results, nil } -func (e *humioTracesExporter) spanToHumioEvent(span pdata.Span, inst pdata.InstrumentationScope, res pdata.Resource) *HumioStructuredEvent { +func (e *humioTracesExporter) spanToHumioEvent(span ptrace.Span, inst pcommon.InstrumentationScope, res pcommon.Resource) *HumioStructuredEvent { attr := toHumioAttributes(span.Attributes(), res.Attributes()) if instName := inst.Name(); instName != "" { attr[conventions.OtelLibraryName] = instName @@ -196,7 +197,7 @@ func (e *humioTracesExporter) spanToHumioEvent(span pdata.Span, inst pdata.Instr } } -func toHumioLinks(pLinks pdata.SpanLinkSlice) []*HumioLink { +func toHumioLinks(pLinks ptrace.SpanLinkSlice) []*HumioLink { links := make([]*HumioLink, 0, pLinks.Len()) for i := 0; i < pLinks.Len(); i++ { link := pLinks.At(i) @@ -209,10 +210,10 @@ func toHumioLinks(pLinks pdata.SpanLinkSlice) []*HumioLink { return links } -func toHumioAttributes(attrMaps ...pdata.Map) map[string]interface{} { +func toHumioAttributes(attrMaps ...pcommon.Map) map[string]interface{} { attr := make(map[string]interface{}) for _, attrMap := range attrMaps { - attrMap.Range(func(k string, v pdata.Value) bool { + attrMap.Range(func(k string, v pcommon.Value) bool { attr[k] = toHumioAttributeValue(v) return true }) @@ -220,19 +221,19 @@ func toHumioAttributes(attrMaps ...pdata.Map) map[string]interface{} { return attr } -func toHumioAttributeValue(rawVal pdata.Value) interface{} { +func toHumioAttributeValue(rawVal pcommon.Value) interface{} { switch rawVal.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return rawVal.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return rawVal.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return rawVal.DoubleVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return rawVal.BoolVal() - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: return toHumioAttributes(rawVal.MapVal()) - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: arrVal := rawVal.SliceVal() arr := make([]interface{}, 0, arrVal.Len()) for i := 0; i < arrVal.Len(); i++ { diff --git a/exporter/humioexporter/traces_exporter_test.go b/exporter/humioexporter/traces_exporter_test.go index ba6a46be3387..7fa727f436cc 100644 --- a/exporter/humioexporter/traces_exporter_test.go +++ b/exporter/humioexporter/traces_exporter_test.go @@ -26,8 +26,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func createSpanID(stringVal string) [8]byte { @@ -110,7 +111,7 @@ func TestPushTraceData(t *testing.T) { t.Errorf("unexpected error when starting component") } - err = exp.pushTraceData(context.Background(), pdata.NewTraces()) + err = exp.pushTraceData(context.Background(), ptrace.NewTraces()) // Assert if (err != nil) != tC.wantErr { @@ -129,7 +130,7 @@ func TestPushTraceData_PermanentOnCompleteFailure(t *testing.T) { // Arrange // We do not export spans with missing service names, so this span should // fail exporting - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() cg := func(cfg *Config, settings component.TelemetrySettings, host component.Host) (exporterClient, error) { @@ -153,7 +154,7 @@ func TestPushTraceData_PermanentOnCompleteFailure(t *testing.T) { func TestPushTraceData_TransientOnPartialFailure(t *testing.T) { // Arrange // Prepare a valid span with a service name... - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(2) rspan := traces.ResourceSpans().AppendEmpty() rspan.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service1") @@ -189,23 +190,23 @@ func TestPushTraceData_TransientOnPartialFailure(t *testing.T) { func TestTracesToHumioEvents_OrganizedByTags(t *testing.T) { // Arrange - traces := pdata.NewTraces() + traces := ptrace.NewTraces() // Three spans for the same trace across two different resources, as // well a span from a separate trace res1 := traces.ResourceSpans().AppendEmpty() res1.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service-A") ils1 := res1.ScopeSpans().AppendEmpty() - ils1.Spans().AppendEmpty().SetTraceID(pdata.NewTraceID(createTraceID("10000000000000000000000000000000"))) - ils1.Spans().AppendEmpty().SetTraceID(pdata.NewTraceID(createTraceID("10000000000000000000000000000000"))) + ils1.Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("10000000000000000000000000000000"))) + ils1.Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("10000000000000000000000000000000"))) res2 := traces.ResourceSpans().AppendEmpty() res2.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service-B") - res2.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(pdata.NewTraceID(createTraceID("10000000000000000000000000000000"))) + res2.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("10000000000000000000000000000000"))) res3 := traces.ResourceSpans().AppendEmpty() res3.Resource().Attributes().InsertString(conventions.AttributeServiceName, "service-C") - res3.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(pdata.NewTraceID(createTraceID("20000000000000000000000000000000"))) + res3.ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(pcommon.NewTraceID(createTraceID("20000000000000000000000000000000"))) // Organize by trace id cg := func(cfg *Config, settings component.TelemetrySettings, host component.Host) (exporterClient, error) { @@ -238,26 +239,26 @@ func TestTracesToHumioEvents_OrganizedByTags(t *testing.T) { func TestSpanToHumioEvent(t *testing.T) { // Arrange - span := pdata.NewSpan() - span.SetTraceID(pdata.NewTraceID(createTraceID("10"))) - span.SetSpanID(pdata.NewSpanID(createSpanID("20"))) + span := ptrace.NewSpan() + span.SetTraceID(pcommon.NewTraceID(createTraceID("10"))) + span.SetSpanID(pcommon.NewSpanID(createSpanID("20"))) span.SetName("span") - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime( + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime( time.Date(2020, 1, 1, 12, 0, 0, 0, time.UTC), )) - span.SetEndTimestamp(pdata.NewTimestampFromTime( + span.SetEndTimestamp(pcommon.NewTimestampFromTime( time.Date(2020, 1, 1, 12, 0, 16, 0, time.UTC), )) - span.Status().SetCode(pdata.StatusCodeOk) + span.Status().SetCode(ptrace.StatusCodeOk) span.Status().SetMessage("done") span.Attributes().InsertString("key", "val") - inst := pdata.NewInstrumentationScope() + inst := pcommon.NewInstrumentationScope() inst.SetName("otel-test") inst.SetVersion("1.0.0") - res := pdata.NewResource() + res := pcommon.NewResource() res.Attributes().InsertString("service.name", "myapp") expected := &HumioStructuredEvent{ @@ -305,9 +306,9 @@ func TestSpanToHumioEvent(t *testing.T) { func TestSpanToHumioEventNoInstrumentation(t *testing.T) { // Arrange - span := pdata.NewSpan() - inst := pdata.NewInstrumentationScope() - res := pdata.NewResource() + span := ptrace.NewSpan() + inst := pcommon.NewInstrumentationScope() + res := pcommon.NewResource() cg := func(cfg *Config, settings component.TelemetrySettings, host component.Host) (exporterClient, error) { return &clientMock{}, nil @@ -332,15 +333,15 @@ func TestSpanToHumioEventNoInstrumentation(t *testing.T) { func TestToHumioLinks(t *testing.T) { // Arrange - slice := pdata.NewSpanLinkSlice() + slice := ptrace.NewSpanLinkSlice() link1 := slice.AppendEmpty() - link1.SetTraceID(pdata.NewTraceID(createTraceID("11"))) - link1.SetSpanID(pdata.NewSpanID(createSpanID("22"))) + link1.SetTraceID(pcommon.NewTraceID(createTraceID("11"))) + link1.SetSpanID(pcommon.NewSpanID(createSpanID("22"))) link1.SetTraceState("state1") link2 := slice.AppendEmpty() - link2.SetTraceID(pdata.NewTraceID(createTraceID("33"))) - link2.SetSpanID(pdata.NewSpanID(createSpanID("44"))) + link2.SetTraceID(pcommon.NewTraceID(createTraceID("33"))) + link2.SetSpanID(pcommon.NewSpanID(createSpanID("44"))) expected := []*HumioLink{ { @@ -366,13 +367,13 @@ func TestToHumioAttributes(t *testing.T) { // Arrange testCases := []struct { desc string - attr func() pdata.Map + attr func() pcommon.Map expected interface{} }{ { desc: "Simple types", - attr: func() pdata.Map { - attrMap := pdata.NewMap() + attr: func() pcommon.Map { + attrMap := pcommon.NewMap() attrMap.InsertString("string", "val") attrMap.InsertInt("integer", 42) attrMap.InsertDouble("double", 4.2) @@ -388,8 +389,8 @@ func TestToHumioAttributes(t *testing.T) { }, { desc: "Nil element", - attr: func() pdata.Map { - attrMap := pdata.NewMap() + attr: func() pcommon.Map { + attrMap := pcommon.NewMap() attrMap.InsertNull("key") return attrMap }, @@ -399,9 +400,9 @@ func TestToHumioAttributes(t *testing.T) { }, { desc: "Array element", - attr: func() pdata.Map { - attrMap := pdata.NewMap() - arr := pdata.NewValueSlice() + attr: func() pcommon.Map { + attrMap := pcommon.NewMap() + arr := pcommon.NewValueSlice() arr.SliceVal().AppendEmpty().SetStringVal("a") arr.SliceVal().AppendEmpty().SetStringVal("b") arr.SliceVal().AppendEmpty().SetIntVal(4) @@ -416,9 +417,9 @@ func TestToHumioAttributes(t *testing.T) { }, { desc: "Nested map", - attr: func() pdata.Map { - attrMap := pdata.NewMap() - nested := pdata.NewValueMap() + attr: func() pcommon.Map { + attrMap := pcommon.NewMap() + nested := pcommon.NewValueMap() nested.MapVal().InsertString("key", "val") attrMap.Insert("nested", nested) attrMap.InsertBool("active", true) @@ -445,11 +446,11 @@ func TestToHumioAttributes(t *testing.T) { func TestToHumioAttributesShaded(t *testing.T) { // Arrange - attrMapA := pdata.NewMap() + attrMapA := pcommon.NewMap() attrMapA.InsertString("string", "val") attrMapA.InsertInt("integer", 42) - attrMapB := pdata.NewMap() + attrMapB := pcommon.NewMap() attrMapB.InsertInt("integer", 0) attrMapB.InsertString("key", "val") diff --git a/exporter/influxdbexporter/exporter.go b/exporter/influxdbexporter/exporter.go index abbdd5bebd65..d8f96a5d3ebb 100644 --- a/exporter/influxdbexporter/exporter.go +++ b/exporter/influxdbexporter/exporter.go @@ -22,7 +22,9 @@ import ( "github.com/influxdata/influxdb-observability/otel2influx" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) type tracesExporter struct { @@ -45,7 +47,7 @@ func newTracesExporter(config *Config, params component.ExporterCreateSettings) } } -func (e *tracesExporter) pushTraces(ctx context.Context, td pdata.Traces) error { +func (e *tracesExporter) pushTraces(ctx context.Context, td ptrace.Traces) error { batch := e.writer.newBatch() err := e.converter.WriteTraces(ctx, td, batch) @@ -100,7 +102,7 @@ func newMetricsExporter(config *Config, params component.ExporterCreateSettings) }, nil } -func (e *metricsExporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { +func (e *metricsExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { batch := e.writer.newBatch() err := e.converter.WriteMetrics(ctx, md, batch) @@ -142,7 +144,7 @@ func newLogsExporter(config *Config, params component.ExporterCreateSettings) *l } } -func (e *logsExporter) pushLogs(ctx context.Context, ld pdata.Logs) error { +func (e *logsExporter) pushLogs(ctx context.Context, ld plog.Logs) error { batch := e.writer.newBatch() err := e.converter.WriteLogs(ctx, ld, batch) diff --git a/exporter/influxdbexporter/go.mod b/exporter/influxdbexporter/go.mod index 0b14ebc251a2..053283a8c281 100644 --- a/exporter/influxdbexporter/go.mod +++ b/exporter/influxdbexporter/go.mod @@ -7,14 +7,14 @@ require ( github.com/influxdata/influxdb-observability/otel2influx v0.2.17 github.com/influxdata/line-protocol/v2 v2.2.1 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/frankban/quicktest v1.14.0 // indirect @@ -25,7 +25,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -33,20 +33,20 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/influxdbexporter/go.sum b/exporter/influxdbexporter/go.sum index a1abcb8a9e07..e9c312c87bbf 100644 --- a/exporter/influxdbexporter/go.sum +++ b/exporter/influxdbexporter/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -18,16 +17,14 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -39,7 +36,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -99,7 +95,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -142,8 +137,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -193,9 +188,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -211,10 +203,13 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -225,7 +220,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -264,8 +259,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -290,13 +285,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -322,7 +315,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -332,7 +324,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -346,7 +337,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/exporter/jaegerexporter/exporter.go b/exporter/jaegerexporter/exporter.go index ec4a3b004193..f38083a304d2 100644 --- a/exporter/jaegerexporter/exporter.go +++ b/exporter/jaegerexporter/exporter.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -92,7 +92,7 @@ type stateReporter interface { func (s *protoGRPCSender) pushTraces( ctx context.Context, - td pdata.Traces, + td ptrace.Traces, ) error { batches, err := jaeger.ProtoFromTraces(td) diff --git a/exporter/jaegerexporter/exporter_test.go b/exporter/jaegerexporter/exporter_test.go index f33b21888fc5..1409c3bbbe69 100644 --- a/exporter/jaegerexporter/exporter_test.go +++ b/exporter/jaegerexporter/exporter_test.go @@ -30,7 +30,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" @@ -227,10 +228,10 @@ func TestMutualTLS(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, exporter.Shutdown(context.Background())) }) - traceID := pdata.NewTraceID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) - spanID := pdata.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7}) + traceID := pcommon.NewTraceID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}) + spanID := pcommon.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7}) - td := pdata.NewTraces() + td := ptrace.NewTraces() span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetTraceID(traceID) span.SetSpanID(spanID) diff --git a/exporter/jaegerexporter/go.mod b/exporter/jaegerexporter/go.mod index 13eb1041fd3f..fa1785bc37c5 100644 --- a/exporter/jaegerexporter/go.mod +++ b/exporter/jaegerexporter/go.mod @@ -8,15 +8,15 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 ) require ( github.com/apache/thrift v0.16.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -25,24 +25,24 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.1.16 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -53,3 +53,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/jaegerexporter/go.sum b/exporter/jaegerexporter/go.sum index 3a63698e016c..1b7a62ef6837 100644 --- a/exporter/jaegerexporter/go.sum +++ b/exporter/jaegerexporter/go.sum @@ -23,8 +23,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -102,7 +102,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -138,8 +137,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -195,8 +194,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -217,10 +214,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -229,7 +228,7 @@ go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= @@ -268,8 +267,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -295,8 +294,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/exporter/jaegerthrifthttpexporter/exporter.go b/exporter/jaegerthrifthttpexporter/exporter.go index 444297110b9e..bd56c785e53b 100644 --- a/exporter/jaegerthrifthttpexporter/exporter.go +++ b/exporter/jaegerthrifthttpexporter/exporter.go @@ -29,7 +29,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" jaegertranslator "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) @@ -72,7 +72,7 @@ func (s *jaegerThriftHTTPSender) start(_ context.Context, host component.Host) ( func (s *jaegerThriftHTTPSender) pushTraceData( ctx context.Context, - td pdata.Traces, + td ptrace.Traces, ) error { batches, err := jaegertranslator.ProtoFromTraces(td) if err != nil { diff --git a/exporter/jaegerthrifthttpexporter/exporter_test.go b/exporter/jaegerthrifthttpexporter/exporter_test.go index a54f85c7403a..654ac7088bee 100644 --- a/exporter/jaegerthrifthttpexporter/exporter_test.go +++ b/exporter/jaegerthrifthttpexporter/exporter_test.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) const testHTTPAddress = "http://a.example.com:123/at/some/path" @@ -43,6 +43,6 @@ func TestNew(t *testing.T) { assert.NoError(t, err) require.NotNil(t, got) - err = got.ConsumeTraces(context.Background(), pdata.NewTraces()) + err = got.ConsumeTraces(context.Background(), ptrace.NewTraces()) assert.NoError(t, err) } diff --git a/exporter/jaegerthrifthttpexporter/go.mod b/exporter/jaegerthrifthttpexporter/go.mod index 934e6b776e2a..10431c058932 100644 --- a/exporter/jaegerthrifthttpexporter/go.mod +++ b/exporter/jaegerthrifthttpexporter/go.mod @@ -7,12 +7,12 @@ require ( github.com/jaegertracing/jaeger v1.32.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -22,7 +22,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -30,10 +30,10 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -41,8 +41,8 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -56,3 +56,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/jaegerthrifthttpexporter/go.sum b/exporter/jaegerthrifthttpexporter/go.sum index 0c0c67fc60ad..ee4975ed4600 100644 --- a/exporter/jaegerthrifthttpexporter/go.sum +++ b/exporter/jaegerthrifthttpexporter/go.sum @@ -21,8 +21,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -97,7 +97,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -131,8 +130,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -178,8 +177,6 @@ github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -200,10 +197,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -214,7 +213,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -253,8 +252,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -278,8 +277,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/exporter/kafkaexporter/factory_test.go b/exporter/kafkaexporter/factory_test.go index 0f380432b5cb..5d1a0d19a5f3 100644 --- a/exporter/kafkaexporter/factory_test.go +++ b/exporter/kafkaexporter/factory_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configtest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestCreateDefaultConfig(t *testing.T) { @@ -127,7 +127,7 @@ type customMarshaler struct { var _ TracesMarshaler = (*customMarshaler)(nil) -func (c customMarshaler) Marshal(_ pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { +func (c customMarshaler) Marshal(_ ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { panic("implement me") } diff --git a/exporter/kafkaexporter/go.mod b/exporter/kafkaexporter/go.mod index 3a2ac13cd0b0..473a5e6cac7c 100644 --- a/exporter/kafkaexporter/go.mod +++ b/exporter/kafkaexporter/go.mod @@ -11,20 +11,20 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/stretchr/testify v1.7.1 github.com/xdg-go/scram v1.1.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) require ( github.com/apache/thrift v0.16.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect @@ -34,7 +34,7 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -42,7 +42,6 @@ require ( github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect @@ -53,12 +52,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) @@ -66,3 +61,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/kafkaexporter/go.sum b/exporter/kafkaexporter/go.sum index ba1fe8667dbb..ddc363118773 100644 --- a/exporter/kafkaexporter/go.sum +++ b/exporter/kafkaexporter/go.sum @@ -1,13 +1,10 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.32.0 h1:P+RUjEaRU0GMMbYexGMDyrMkLhbbBVUVISDywi+IlFU= github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs= github.com/Shopify/toxiproxy/v2 v2.3.0 h1:62YkpiP4bzdhKMH+6uC5E95y608k3zDwdzuBMsnn3uQ= github.com/Shopify/toxiproxy/v2 v2.3.0/go.mod h1:KvQTtB6RjCJY4zqNJn7C7JDFgsG5uoHYDirfUfpIm0c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.16.0 h1:qEy6UW60iVOlUy+b9ZR0d5WzUWYGOo4HfopoyBaNmoY= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -27,19 +24,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -54,9 +43,6 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -67,7 +53,6 @@ github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnX github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -85,18 +70,14 @@ github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3K github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -107,16 +88,13 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -166,8 +144,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -214,7 +192,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= @@ -223,9 +200,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -233,7 +207,6 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -256,20 +229,21 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -296,22 +270,20 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -328,7 +300,6 @@ golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -337,8 +308,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -368,22 +338,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -393,11 +357,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporter/kafkaexporter/jaeger_marshaler.go b/exporter/kafkaexporter/jaeger_marshaler.go index 3260a1b58e67..3ca66281ad95 100644 --- a/exporter/kafkaexporter/jaeger_marshaler.go +++ b/exporter/kafkaexporter/jaeger_marshaler.go @@ -20,7 +20,7 @@ import ( "github.com/Shopify/sarama" "github.com/gogo/protobuf/jsonpb" jaegerproto "github.com/jaegertracing/jaeger/model" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" @@ -32,7 +32,7 @@ type jaegerMarshaler struct { var _ TracesMarshaler = (*jaegerMarshaler)(nil) -func (j jaegerMarshaler) Marshal(traces pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { +func (j jaegerMarshaler) Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { batches, err := jaeger.ProtoFromTraces(traces) if err != nil { return nil, err diff --git a/exporter/kafkaexporter/jaeger_marshaler_test.go b/exporter/kafkaexporter/jaeger_marshaler_test.go index d04530dc2ea3..5e3c5e78bf66 100644 --- a/exporter/kafkaexporter/jaeger_marshaler_test.go +++ b/exporter/kafkaexporter/jaeger_marshaler_test.go @@ -22,19 +22,20 @@ import ( "github.com/gogo/protobuf/jsonpb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) func TestJaegerMarshaler(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("foo") - span.SetStartTimestamp(pdata.Timestamp(10)) - span.SetEndTimestamp(pdata.Timestamp(20)) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetStartTimestamp(pcommon.Timestamp(10)) + span.SetEndTimestamp(pcommon.Timestamp(20)) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) batches, err := jaeger.ProtoFromTraces(td) require.NoError(t, err) diff --git a/exporter/kafkaexporter/kafka_exporter.go b/exporter/kafkaexporter/kafka_exporter.go index 7211c1723887..5585c3a0cfe9 100644 --- a/exporter/kafkaexporter/kafka_exporter.go +++ b/exporter/kafkaexporter/kafka_exporter.go @@ -21,7 +21,9 @@ import ( "github.com/Shopify/sarama" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -44,7 +46,7 @@ func (ke kafkaErrors) Error() string { return fmt.Sprintf("Failed to deliver %d messages due to %s", ke.count, ke.err) } -func (e *kafkaTracesProducer) tracesPusher(_ context.Context, td pdata.Traces) error { +func (e *kafkaTracesProducer) tracesPusher(_ context.Context, td ptrace.Traces) error { messages, err := e.marshaler.Marshal(td, e.topic) if err != nil { return consumererror.NewPermanent(err) @@ -73,7 +75,7 @@ type kafkaMetricsProducer struct { logger *zap.Logger } -func (e *kafkaMetricsProducer) metricsDataPusher(_ context.Context, md pdata.Metrics) error { +func (e *kafkaMetricsProducer) metricsDataPusher(_ context.Context, md pmetric.Metrics) error { messages, err := e.marshaler.Marshal(md, e.topic) if err != nil { return consumererror.NewPermanent(err) @@ -102,7 +104,7 @@ type kafkaLogsProducer struct { logger *zap.Logger } -func (e *kafkaLogsProducer) logsDataPusher(_ context.Context, ld pdata.Logs) error { +func (e *kafkaLogsProducer) logsDataPusher(_ context.Context, ld plog.Logs) error { messages, err := e.marshaler.Marshal(ld, e.topic) if err != nil { return consumererror.NewPermanent(err) diff --git a/exporter/kafkaexporter/kafka_exporter_test.go b/exporter/kafkaexporter/kafka_exporter_test.go index 279ea72a2039..01d514ca052a 100644 --- a/exporter/kafkaexporter/kafka_exporter_test.go +++ b/exporter/kafkaexporter/kafka_exporter_test.go @@ -25,8 +25,9 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" @@ -141,7 +142,7 @@ func TestTracesPusher(t *testing.T) { p := kafkaTracesProducer{ producer: producer, - marshaler: newPdataTracesMarshaler(otlp.NewProtobufTracesMarshaler(), defaultEncoding), + marshaler: newPdataTracesMarshaler(ptrace.NewProtoMarshaler(), defaultEncoding), } t.Cleanup(func() { require.NoError(t, p.Close(context.Background())) @@ -158,7 +159,7 @@ func TestTracesPusher_err(t *testing.T) { p := kafkaTracesProducer{ producer: producer, - marshaler: newPdataTracesMarshaler(otlp.NewProtobufTracesMarshaler(), defaultEncoding), + marshaler: newPdataTracesMarshaler(ptrace.NewProtoMarshaler(), defaultEncoding), logger: zap.NewNop(), } t.Cleanup(func() { @@ -188,7 +189,7 @@ func TestMetricsDataPusher(t *testing.T) { p := kafkaMetricsProducer{ producer: producer, - marshaler: newPdataMetricsMarshaler(otlp.NewProtobufMetricsMarshaler(), defaultEncoding), + marshaler: newPdataMetricsMarshaler(pmetric.NewProtoMarshaler(), defaultEncoding), } t.Cleanup(func() { require.NoError(t, p.Close(context.Background())) @@ -205,7 +206,7 @@ func TestMetricsDataPusher_err(t *testing.T) { p := kafkaMetricsProducer{ producer: producer, - marshaler: newPdataMetricsMarshaler(otlp.NewProtobufMetricsMarshaler(), defaultEncoding), + marshaler: newPdataMetricsMarshaler(pmetric.NewProtoMarshaler(), defaultEncoding), logger: zap.NewNop(), } t.Cleanup(func() { @@ -235,7 +236,7 @@ func TestLogsDataPusher(t *testing.T) { p := kafkaLogsProducer{ producer: producer, - marshaler: newPdataLogsMarshaler(otlp.NewProtobufLogsMarshaler(), defaultEncoding), + marshaler: newPdataLogsMarshaler(plog.NewProtoMarshaler(), defaultEncoding), } t.Cleanup(func() { require.NoError(t, p.Close(context.Background())) @@ -252,7 +253,7 @@ func TestLogsDataPusher_err(t *testing.T) { p := kafkaLogsProducer{ producer: producer, - marshaler: newPdataLogsMarshaler(otlp.NewProtobufLogsMarshaler(), defaultEncoding), + marshaler: newPdataLogsMarshaler(plog.NewProtoMarshaler(), defaultEncoding), logger: zap.NewNop(), } t.Cleanup(func() { @@ -287,7 +288,7 @@ type logsErrorMarshaler struct { err error } -func (e metricsErrorMarshaler) Marshal(_ pdata.Metrics, _ string) ([]*sarama.ProducerMessage, error) { +func (e metricsErrorMarshaler) Marshal(_ pmetric.Metrics, _ string) ([]*sarama.ProducerMessage, error) { return nil, e.err } @@ -297,7 +298,7 @@ func (e metricsErrorMarshaler) Encoding() string { var _ TracesMarshaler = (*tracesErrorMarshaler)(nil) -func (e tracesErrorMarshaler) Marshal(_ pdata.Traces, _ string) ([]*sarama.ProducerMessage, error) { +func (e tracesErrorMarshaler) Marshal(_ ptrace.Traces, _ string) ([]*sarama.ProducerMessage, error) { return nil, e.err } @@ -305,7 +306,7 @@ func (e tracesErrorMarshaler) Encoding() string { panic("implement me") } -func (e logsErrorMarshaler) Marshal(_ pdata.Logs, _ string) ([]*sarama.ProducerMessage, error) { +func (e logsErrorMarshaler) Marshal(_ plog.Logs, _ string) ([]*sarama.ProducerMessage, error) { return nil, e.err } diff --git a/exporter/kafkaexporter/marshaler.go b/exporter/kafkaexporter/marshaler.go index e013da1ae84b..bdc34e4a44f9 100644 --- a/exporter/kafkaexporter/marshaler.go +++ b/exporter/kafkaexporter/marshaler.go @@ -16,14 +16,15 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collect import ( "github.com/Shopify/sarama" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) // TracesMarshaler marshals traces into Message array. type TracesMarshaler interface { // Marshal serializes spans into sarama's ProducerMessages - Marshal(traces pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) + Marshal(traces ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) // Encoding returns encoding name Encoding() string @@ -32,7 +33,7 @@ type TracesMarshaler interface { // MetricsMarshaler marshals metrics into Message array type MetricsMarshaler interface { // Marshal serializes metrics into sarama's ProducerMessages - Marshal(metrics pdata.Metrics, topic string) ([]*sarama.ProducerMessage, error) + Marshal(metrics pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) // Encoding returns encoding name Encoding() string @@ -41,7 +42,7 @@ type MetricsMarshaler interface { // LogsMarshaler marshals logs into Message array type LogsMarshaler interface { // Marshal serializes logs into sarama's ProducerMessages - Marshal(logs pdata.Logs, topic string) ([]*sarama.ProducerMessage, error) + Marshal(logs plog.Logs, topic string) ([]*sarama.ProducerMessage, error) // Encoding returns encoding name Encoding() string @@ -49,8 +50,8 @@ type LogsMarshaler interface { // tracesMarshalers returns map of supported encodings with TracesMarshaler. func tracesMarshalers() map[string]TracesMarshaler { - otlpPb := newPdataTracesMarshaler(otlp.NewProtobufTracesMarshaler(), defaultEncoding) - otlpJSON := newPdataTracesMarshaler(otlp.NewJSONTracesMarshaler(), "otlp_json") + otlpPb := newPdataTracesMarshaler(ptrace.NewProtoMarshaler(), defaultEncoding) + otlpJSON := newPdataTracesMarshaler(ptrace.NewJSONMarshaler(), "otlp_json") jaegerProto := jaegerMarshaler{marshaler: jaegerProtoSpanMarshaler{}} jaegerJSON := jaegerMarshaler{marshaler: newJaegerJSONMarshaler()} return map[string]TracesMarshaler{ @@ -63,8 +64,8 @@ func tracesMarshalers() map[string]TracesMarshaler { // metricsMarshalers returns map of supported encodings and MetricsMarshaler func metricsMarshalers() map[string]MetricsMarshaler { - otlpPb := newPdataMetricsMarshaler(otlp.NewProtobufMetricsMarshaler(), defaultEncoding) - otlpJSON := newPdataMetricsMarshaler(otlp.NewJSONMetricsMarshaler(), "otlp_json") + otlpPb := newPdataMetricsMarshaler(pmetric.NewProtoMarshaler(), defaultEncoding) + otlpJSON := newPdataMetricsMarshaler(pmetric.NewJSONMarshaler(), "otlp_json") return map[string]MetricsMarshaler{ otlpPb.Encoding(): otlpPb, otlpJSON.Encoding(): otlpJSON, @@ -73,8 +74,8 @@ func metricsMarshalers() map[string]MetricsMarshaler { // logsMarshalers returns map of supported encodings and LogsMarshaler func logsMarshalers() map[string]LogsMarshaler { - otlpPb := newPdataLogsMarshaler(otlp.NewProtobufLogsMarshaler(), defaultEncoding) - otlpJSON := newPdataLogsMarshaler(otlp.NewJSONLogsMarshaler(), "otlp_json") + otlpPb := newPdataLogsMarshaler(plog.NewProtoMarshaler(), defaultEncoding) + otlpJSON := newPdataLogsMarshaler(plog.NewJSONMarshaler(), "otlp_json") return map[string]LogsMarshaler{ otlpPb.Encoding(): otlpPb, otlpJSON.Encoding(): otlpJSON, diff --git a/exporter/kafkaexporter/marshaler_test.go b/exporter/kafkaexporter/marshaler_test.go index 6e4e88457986..87b7b5bcfbbc 100644 --- a/exporter/kafkaexporter/marshaler_test.go +++ b/exporter/kafkaexporter/marshaler_test.go @@ -21,8 +21,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" semconv "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestDefaultTracesMarshalers(t *testing.T) { @@ -80,7 +81,7 @@ func TestOTLPTracesJsonMarshaling(t *testing.T) { now := time.Unix(1, 0) - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty() rs := traces.ResourceSpans().At(0) @@ -92,12 +93,12 @@ func TestOTLPTracesJsonMarshaling(t *testing.T) { ils.Spans().AppendEmpty() span := ils.Spans().At(0) - span.SetKind(pdata.SpanKindInternal) + span.SetKind(ptrace.SpanKindInternal) span.SetName(t.Name()) - span.SetStartTimestamp(pdata.NewTimestampFromTime(now)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(now.Add(time.Second))) - span.SetSpanID(pdata.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7})) - span.SetParentSpanID(pdata.NewSpanID([8]byte{8, 9, 10, 11, 12, 13, 14})) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(now.Add(time.Second))) + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7})) + span.SetParentSpanID(pcommon.NewSpanID([8]byte{8, 9, 10, 11, 12, 13, 14})) marshaler, ok := tracesMarshalers()["otlp_json"] require.True(t, ok, "Must have otlp json marshaller") @@ -125,7 +126,7 @@ func TestOTLPTracesJsonMarshaling(t *testing.T) { "spanId": "0001020304050607", "parentSpanId": "08090a0b0c0d0e00", "name": t.Name(), - "kind": pdata.SpanKindInternal.String(), + "kind": ptrace.SpanKindInternal.String(), "startTimeUnixNano": fmt.Sprint(now.UnixNano()), "endTimeUnixNano": fmt.Sprint(now.Add(time.Second).UnixNano()), "status": map[string]interface{}{}, diff --git a/exporter/kafkaexporter/pdata_marshaler.go b/exporter/kafkaexporter/pdata_marshaler.go index a1f24e8a2500..2fec31e28585 100644 --- a/exporter/kafkaexporter/pdata_marshaler.go +++ b/exporter/kafkaexporter/pdata_marshaler.go @@ -16,15 +16,17 @@ package kafkaexporter // import "github.com/open-telemetry/opentelemetry-collect import ( "github.com/Shopify/sarama" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) type pdataLogsMarshaler struct { - marshaler pdata.LogsMarshaler + marshaler plog.Marshaler encoding string } -func (p pdataLogsMarshaler) Marshal(ld pdata.Logs, topic string) ([]*sarama.ProducerMessage, error) { +func (p pdataLogsMarshaler) Marshal(ld plog.Logs, topic string) ([]*sarama.ProducerMessage, error) { bts, err := p.marshaler.MarshalLogs(ld) if err != nil { return nil, err @@ -41,7 +43,7 @@ func (p pdataLogsMarshaler) Encoding() string { return p.encoding } -func newPdataLogsMarshaler(marshaler pdata.LogsMarshaler, encoding string) LogsMarshaler { +func newPdataLogsMarshaler(marshaler plog.Marshaler, encoding string) LogsMarshaler { return pdataLogsMarshaler{ marshaler: marshaler, encoding: encoding, @@ -49,11 +51,11 @@ func newPdataLogsMarshaler(marshaler pdata.LogsMarshaler, encoding string) LogsM } type pdataMetricsMarshaler struct { - marshaler pdata.MetricsMarshaler + marshaler pmetric.Marshaler encoding string } -func (p pdataMetricsMarshaler) Marshal(ld pdata.Metrics, topic string) ([]*sarama.ProducerMessage, error) { +func (p pdataMetricsMarshaler) Marshal(ld pmetric.Metrics, topic string) ([]*sarama.ProducerMessage, error) { bts, err := p.marshaler.MarshalMetrics(ld) if err != nil { return nil, err @@ -70,7 +72,7 @@ func (p pdataMetricsMarshaler) Encoding() string { return p.encoding } -func newPdataMetricsMarshaler(marshaler pdata.MetricsMarshaler, encoding string) MetricsMarshaler { +func newPdataMetricsMarshaler(marshaler pmetric.Marshaler, encoding string) MetricsMarshaler { return pdataMetricsMarshaler{ marshaler: marshaler, encoding: encoding, @@ -78,11 +80,11 @@ func newPdataMetricsMarshaler(marshaler pdata.MetricsMarshaler, encoding string) } type pdataTracesMarshaler struct { - marshaler pdata.TracesMarshaler + marshaler ptrace.Marshaler encoding string } -func (p pdataTracesMarshaler) Marshal(td pdata.Traces, topic string) ([]*sarama.ProducerMessage, error) { +func (p pdataTracesMarshaler) Marshal(td ptrace.Traces, topic string) ([]*sarama.ProducerMessage, error) { bts, err := p.marshaler.MarshalTraces(td) if err != nil { return nil, err @@ -99,7 +101,7 @@ func (p pdataTracesMarshaler) Encoding() string { return p.encoding } -func newPdataTracesMarshaler(marshaler pdata.TracesMarshaler, encoding string) TracesMarshaler { +func newPdataTracesMarshaler(marshaler ptrace.Marshaler, encoding string) TracesMarshaler { return pdataTracesMarshaler{ marshaler: marshaler, encoding: encoding, diff --git a/exporter/loadbalancingexporter/consistent_hashing.go b/exporter/loadbalancingexporter/consistent_hashing.go index 48972aeae7dd..ed94c3bb4d26 100644 --- a/exporter/loadbalancingexporter/consistent_hashing.go +++ b/exporter/loadbalancingexporter/consistent_hashing.go @@ -18,7 +18,7 @@ import ( "hash/crc32" "sort" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) const maxPositions uint32 = 36000 // 360 degrees with two decimal places @@ -49,7 +49,7 @@ func newHashRing(endpoints []string) *hashRing { } // endpointFor calculates which backend is responsible for the given traceID -func (h *hashRing) endpointFor(traceID pdata.TraceID) string { +func (h *hashRing) endpointFor(traceID pcommon.TraceID) string { b := traceID.Bytes() hasher := crc32.NewIEEE() hasher.Write(b[:]) diff --git a/exporter/loadbalancingexporter/consistent_hashing_test.go b/exporter/loadbalancingexporter/consistent_hashing_test.go index 71b8d791a062..ee8fafa645cf 100644 --- a/exporter/loadbalancingexporter/consistent_hashing_test.go +++ b/exporter/loadbalancingexporter/consistent_hashing_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestNewHashRing(t *testing.T) { @@ -39,12 +39,12 @@ func TestEndpointFor(t *testing.T) { ring := newHashRing(endpoints) for _, tt := range []struct { - traceID pdata.TraceID + traceID pcommon.TraceID expected string }{ // check that we are indeed alternating endpoints for different inputs - {pdata.NewTraceID([16]byte{1, 2, 0, 0}), "endpoint-1"}, - {pdata.NewTraceID([16]byte{128, 128, 0, 0}), "endpoint-2"}, + {pcommon.NewTraceID([16]byte{1, 2, 0, 0}), "endpoint-1"}, + {pcommon.NewTraceID([16]byte{128, 128, 0, 0}), "endpoint-2"}, } { t.Run(fmt.Sprintf("Endpoint for traceID %s", tt.traceID.HexString()), func(t *testing.T) { // test diff --git a/exporter/loadbalancingexporter/go.mod b/exporter/loadbalancingexporter/go.mod index ee03e87c84ce..a43f234e3215 100644 --- a/exporter/loadbalancingexporter/go.mod +++ b/exporter/loadbalancingexporter/go.mod @@ -6,15 +6,15 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) require ( cloud.google.com/go/compute v1.5.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -23,7 +23,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -31,13 +31,12 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect @@ -50,3 +49,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal => ../../pkg/batchpersignal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/loadbalancingexporter/go.sum b/exporter/loadbalancingexporter/go.sum index 823618a017d3..0336dbd39020 100644 --- a/exporter/loadbalancingexporter/go.sum +++ b/exporter/loadbalancingexporter/go.sum @@ -69,8 +69,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -197,7 +197,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -235,8 +234,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -293,8 +292,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -319,10 +316,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -331,7 +328,7 @@ go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= @@ -422,8 +419,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/exporter/loadbalancingexporter/loadbalancer.go b/exporter/loadbalancingexporter/loadbalancer.go index 1b8432b74280..294babd938f0 100644 --- a/exporter/loadbalancingexporter/loadbalancer.go +++ b/exporter/loadbalancingexporter/loadbalancer.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -42,7 +42,7 @@ type componentFactory func(ctx context.Context, endpoint string) (component.Expo type loadBalancer interface { component.Component - Endpoint(traceID pdata.TraceID) string + Endpoint(traceID pcommon.TraceID) string Exporter(endpoint string) (component.Exporter, error) } @@ -173,7 +173,7 @@ func (lb *loadBalancerImp) Shutdown(context.Context) error { return nil } -func (lb *loadBalancerImp) Endpoint(traceID pdata.TraceID) string { +func (lb *loadBalancerImp) Endpoint(traceID pcommon.TraceID) string { lb.updateLock.RLock() defer lb.updateLock.RUnlock() diff --git a/exporter/loadbalancingexporter/loadbalancer_test.go b/exporter/loadbalancingexporter/loadbalancer_test.go index c12093e87349..f890ee79a233 100644 --- a/exporter/loadbalancingexporter/loadbalancer_test.go +++ b/exporter/loadbalancingexporter/loadbalancer_test.go @@ -25,7 +25,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestNewLoadBalancerNoResolver(t *testing.T) { @@ -341,7 +341,7 @@ func TestFailedExporterInRing(t *testing.T) { // test // this trace ID will reach the endpoint-2 -- see the consistent hashing tests for more info - _, err = p.Exporter(p.Endpoint(pdata.NewTraceID([16]byte{128, 128, 0, 0}))) + _, err = p.Exporter(p.Endpoint(pcommon.NewTraceID([16]byte{128, 128, 0, 0}))) // verify assert.Error(t, err) diff --git a/exporter/loadbalancingexporter/log_exporter.go b/exporter/loadbalancingexporter/log_exporter.go index c14e085b5ea0..9711826e41a7 100644 --- a/exporter/loadbalancingexporter/log_exporter.go +++ b/exporter/loadbalancingexporter/log_exporter.go @@ -27,7 +27,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal" @@ -73,7 +74,7 @@ func (e *logExporterImp) Shutdown(context.Context) error { return nil } -func (e *logExporterImp) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +func (e *logExporterImp) ConsumeLogs(ctx context.Context, ld plog.Logs) error { var errs error batches := batchpersignal.SplitLogs(ld) for _, batch := range batches { @@ -83,10 +84,10 @@ func (e *logExporterImp) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { return errs } -func (e *logExporterImp) consumeLog(ctx context.Context, ld pdata.Logs) error { +func (e *logExporterImp) consumeLog(ctx context.Context, ld plog.Logs) error { traceID := traceIDFromLogs(ld) balancingKey := traceID - if traceID == pdata.InvalidTraceID() { + if traceID == pcommon.InvalidTraceID() { // every log may not contain a traceID // generate a random traceID as balancingKey // so the log can be routed to a random backend @@ -121,29 +122,29 @@ func (e *logExporterImp) consumeLog(ctx context.Context, ld pdata.Logs) error { return err } -func traceIDFromLogs(ld pdata.Logs) pdata.TraceID { +func traceIDFromLogs(ld plog.Logs) pcommon.TraceID { rl := ld.ResourceLogs() if rl.Len() == 0 { - return pdata.InvalidTraceID() + return pcommon.InvalidTraceID() } sl := rl.At(0).ScopeLogs() if sl.Len() == 0 { - return pdata.InvalidTraceID() + return pcommon.InvalidTraceID() } logs := sl.At(0).LogRecords() if logs.Len() == 0 { - return pdata.InvalidTraceID() + return pcommon.InvalidTraceID() } return logs.At(0).TraceID() } -func random() pdata.TraceID { +func random() pcommon.TraceID { v1 := uint8(rand.Intn(256)) v2 := uint8(rand.Intn(256)) v3 := uint8(rand.Intn(256)) v4 := uint8(rand.Intn(256)) - return pdata.NewTraceID([16]byte{v1, v2, v3, v4}) + return pcommon.NewTraceID([16]byte{v1, v2, v3, v4}) } diff --git a/exporter/loadbalancingexporter/log_exporter_test.go b/exporter/loadbalancingexporter/log_exporter_test.go index d1bdc7c316d4..c152fb9b6e51 100644 --- a/exporter/loadbalancingexporter/log_exporter_test.go +++ b/exporter/loadbalancingexporter/log_exporter_test.go @@ -30,7 +30,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -242,8 +243,8 @@ func TestLogBatchWithTwoTraces(t *testing.T) { defer p.Shutdown(context.Background()) first := simpleLogs() - second := simpleLogWithID(pdata.NewTraceID([16]byte{2, 3, 4, 5})) - batch := pdata.NewLogs() + second := simpleLogWithID(pcommon.NewTraceID([16]byte{2, 3, 4, 5})) + batch := plog.NewLogs() firstTgt := batch.ResourceLogs().AppendEmpty() first.ResourceLogs().At(0).CopyTo(firstTgt) secondTgt := batch.ResourceLogs().AppendEmpty() @@ -260,24 +261,24 @@ func TestLogBatchWithTwoTraces(t *testing.T) { func TestNoLogsInBatch(t *testing.T) { for _, tt := range []struct { desc string - batch pdata.Logs + batch plog.Logs }{ { "no resource logs", - pdata.NewLogs(), + plog.NewLogs(), }, { "no instrumentation library logs", - func() pdata.Logs { - batch := pdata.NewLogs() + func() plog.Logs { + batch := plog.NewLogs() batch.ResourceLogs().AppendEmpty() return batch }(), }, { "no logs", - func() pdata.Logs { - batch := pdata.NewLogs() + func() plog.Logs { + batch := plog.NewLogs() batch.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() return batch }(), @@ -285,7 +286,7 @@ func TestNoLogsInBatch(t *testing.T) { } { t.Run(tt.desc, func(t *testing.T) { res := traceIDFromLogs(tt.batch) - assert.Equal(t, pdata.InvalidTraceID(), res) + assert.Equal(t, pcommon.InvalidTraceID(), res) }) } } @@ -388,14 +389,14 @@ func TestRollingUpdatesWhenConsumeLogs(t *testing.T) { var counter1, counter2 int64 defaultExporters := map[string]component.Exporter{ - "127.0.0.1": newMockLogsExporter(func(ctx context.Context, ld pdata.Logs) error { + "127.0.0.1": newMockLogsExporter(func(ctx context.Context, ld plog.Logs) error { atomic.AddInt64(&counter1, 1) // simulate an unreachable backend time.Sleep(10 * time.Second) return nil }, ), - "127.0.0.2": newMockLogsExporter(func(ctx context.Context, ld pdata.Logs) error { + "127.0.0.2": newMockLogsExporter(func(ctx context.Context, ld plog.Logs) error { atomic.AddInt64(&counter2, 1) return nil }, @@ -450,16 +451,16 @@ func TestRollingUpdatesWhenConsumeLogs(t *testing.T) { require.Greater(t, atomic.LoadInt64(&counter2), int64(0)) } -func randomLogs() pdata.Logs { +func randomLogs() plog.Logs { return simpleLogWithID(random()) } -func simpleLogs() pdata.Logs { - return simpleLogWithID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) +func simpleLogs() plog.Logs { + return simpleLogWithID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) } -func simpleLogWithID(id pdata.TraceID) pdata.Logs { - logs := pdata.NewLogs() +func simpleLogWithID(id pcommon.TraceID) plog.Logs { + logs := plog.NewLogs() rl := logs.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() sl.LogRecords().AppendEmpty().SetTraceID(id) @@ -467,8 +468,8 @@ func simpleLogWithID(id pdata.TraceID) pdata.Logs { return logs } -func simpleLogWithoutID() pdata.Logs { - logs := pdata.NewLogs() +func simpleLogWithoutID() plog.Logs { + logs := plog.NewLogs() rl := logs.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() sl.LogRecords().AppendEmpty() @@ -478,14 +479,14 @@ func simpleLogWithoutID() pdata.Logs { type mockLogsExporter struct { component.Component - ConsumeLogsFn func(ctx context.Context, ld pdata.Logs) error + ConsumeLogsFn func(ctx context.Context, ld plog.Logs) error } func (e *mockLogsExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (e *mockLogsExporter) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +func (e *mockLogsExporter) ConsumeLogs(ctx context.Context, ld plog.Logs) error { if e.ConsumeLogsFn == nil { return nil } @@ -497,7 +498,7 @@ type mockComponent struct { component.ShutdownFunc } -func newMockLogsExporter(ConsumeLogsFn func(ctx context.Context, ld pdata.Logs) error) component.LogsExporter { +func newMockLogsExporter(ConsumeLogsFn func(ctx context.Context, ld plog.Logs) error) component.LogsExporter { return &mockLogsExporter{ Component: mockComponent{}, ConsumeLogsFn: ConsumeLogsFn, @@ -507,7 +508,7 @@ func newMockLogsExporter(ConsumeLogsFn func(ctx context.Context, ld pdata.Logs) func newNopMockLogsExporter() component.LogsExporter { return &mockLogsExporter{ Component: mockComponent{}, - ConsumeLogsFn: func(ctx context.Context, ld pdata.Logs) error { + ConsumeLogsFn: func(ctx context.Context, ld plog.Logs) error { return nil }, } diff --git a/exporter/loadbalancingexporter/trace_exporter.go b/exporter/loadbalancingexporter/trace_exporter.go index d860bcdc8a43..f5e01d96ada8 100644 --- a/exporter/loadbalancingexporter/trace_exporter.go +++ b/exporter/loadbalancingexporter/trace_exporter.go @@ -27,7 +27,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal" @@ -84,7 +85,7 @@ func (e *traceExporterImp) Shutdown(context.Context) error { return nil } -func (e *traceExporterImp) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (e *traceExporterImp) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { var errs error batches := batchpersignal.SplitTraces(td) for _, batch := range batches { @@ -94,9 +95,9 @@ func (e *traceExporterImp) ConsumeTraces(ctx context.Context, td pdata.Traces) e return errs } -func (e *traceExporterImp) consumeTrace(ctx context.Context, td pdata.Traces) error { +func (e *traceExporterImp) consumeTrace(ctx context.Context, td ptrace.Traces) error { traceID := traceIDFromTraces(td) - if traceID == pdata.InvalidTraceID() { + if traceID == pcommon.InvalidTraceID() { return errNoTracesInBatch } @@ -128,20 +129,20 @@ func (e *traceExporterImp) consumeTrace(ctx context.Context, td pdata.Traces) er return err } -func traceIDFromTraces(td pdata.Traces) pdata.TraceID { +func traceIDFromTraces(td ptrace.Traces) pcommon.TraceID { rs := td.ResourceSpans() if rs.Len() == 0 { - return pdata.InvalidTraceID() + return pcommon.InvalidTraceID() } ils := rs.At(0).ScopeSpans() if ils.Len() == 0 { - return pdata.InvalidTraceID() + return pcommon.InvalidTraceID() } spans := ils.At(0).Spans() if spans.Len() == 0 { - return pdata.InvalidTraceID() + return pcommon.InvalidTraceID() } return spans.At(0).TraceID() diff --git a/exporter/loadbalancingexporter/trace_exporter_test.go b/exporter/loadbalancingexporter/trace_exporter_test.go index d63e1dc61349..250ac8724d10 100644 --- a/exporter/loadbalancingexporter/trace_exporter_test.go +++ b/exporter/loadbalancingexporter/trace_exporter_test.go @@ -33,7 +33,8 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/service/servicetest" "go.uber.org/zap" ) @@ -302,8 +303,8 @@ func TestBatchWithTwoTraces(t *testing.T) { lb.exporters["endpoint-1"] = newMockTracesExporter(sink.ConsumeTraces) first := simpleTraces() - second := simpleTraceWithID(pdata.NewTraceID([16]byte{2, 3, 4, 5})) - batch := pdata.NewTraces() + second := simpleTraceWithID(pcommon.NewTraceID([16]byte{2, 3, 4, 5})) + batch := ptrace.NewTraces() first.ResourceSpans().MoveAndAppendTo(batch.ResourceSpans()) second.ResourceSpans().MoveAndAppendTo(batch.ResourceSpans()) @@ -318,24 +319,24 @@ func TestBatchWithTwoTraces(t *testing.T) { func TestNoTracesInBatch(t *testing.T) { for _, tt := range []struct { desc string - batch pdata.Traces + batch ptrace.Traces }{ { "no resource spans", - pdata.NewTraces(), + ptrace.NewTraces(), }, { "no instrumentation library spans", - func() pdata.Traces { - batch := pdata.NewTraces() + func() ptrace.Traces { + batch := ptrace.NewTraces() batch.ResourceSpans().AppendEmpty() return batch }(), }, { "no spans", - func() pdata.Traces { - batch := pdata.NewTraces() + func() ptrace.Traces { + batch := ptrace.NewTraces() batch.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty() return batch }(), @@ -343,7 +344,7 @@ func TestNoTracesInBatch(t *testing.T) { } { t.Run(tt.desc, func(t *testing.T) { res := traceIDFromTraces(tt.batch) - assert.Equal(t, pdata.InvalidTraceID(), res) + assert.Equal(t, pcommon.InvalidTraceID(), res) }) } } @@ -416,14 +417,14 @@ func TestRollingUpdatesWhenConsumeTraces(t *testing.T) { var counter1, counter2 int64 defaultExporters := map[string]component.Exporter{ - "127.0.0.1": newMockTracesExporter(func(ctx context.Context, td pdata.Traces) error { + "127.0.0.1": newMockTracesExporter(func(ctx context.Context, td ptrace.Traces) error { atomic.AddInt64(&counter1, 1) // simulate an unreachable backend time.Sleep(10 * time.Second) return nil }, ), - "127.0.0.2": newMockTracesExporter(func(ctx context.Context, td pdata.Traces) error { + "127.0.0.2": newMockTracesExporter(func(ctx context.Context, td ptrace.Traces) error { atomic.AddInt64(&counter2, 1) return nil }, @@ -478,20 +479,20 @@ func TestRollingUpdatesWhenConsumeTraces(t *testing.T) { require.Greater(t, atomic.LoadInt64(&counter2), int64(0)) } -func randomTraces() pdata.Traces { +func randomTraces() ptrace.Traces { v1 := uint8(rand.Intn(256)) v2 := uint8(rand.Intn(256)) v3 := uint8(rand.Intn(256)) v4 := uint8(rand.Intn(256)) - return simpleTraceWithID(pdata.NewTraceID([16]byte{v1, v2, v3, v4})) + return simpleTraceWithID(pcommon.NewTraceID([16]byte{v1, v2, v3, v4})) } -func simpleTraces() pdata.Traces { - return simpleTraceWithID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) +func simpleTraces() ptrace.Traces { + return simpleTraceWithID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) } -func simpleTraceWithID(id pdata.TraceID) pdata.Traces { - traces := pdata.NewTraces() +func simpleTraceWithID(id pcommon.TraceID) ptrace.Traces { + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(id) return traces } @@ -507,10 +508,10 @@ func simpleConfig() *Config { type mockTracesExporter struct { component.Component - ConsumeTracesFn func(ctx context.Context, td pdata.Traces) error + ConsumeTracesFn func(ctx context.Context, td ptrace.Traces) error } -func newMockTracesExporter(consumeTracesFn func(ctx context.Context, td pdata.Traces) error) component.TracesExporter { +func newMockTracesExporter(consumeTracesFn func(ctx context.Context, td ptrace.Traces) error) component.TracesExporter { return &mockTracesExporter{ Component: mockComponent{}, ConsumeTracesFn: consumeTracesFn, @@ -520,7 +521,7 @@ func newMockTracesExporter(consumeTracesFn func(ctx context.Context, td pdata.Tr func newNopMockTracesExporter() component.TracesExporter { return &mockTracesExporter{ Component: mockComponent{}, - ConsumeTracesFn: func(ctx context.Context, td pdata.Traces) error { + ConsumeTracesFn: func(ctx context.Context, td ptrace.Traces) error { return nil }, } @@ -530,7 +531,7 @@ func (e *mockTracesExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (e *mockTracesExporter) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (e *mockTracesExporter) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { if e.ConsumeTracesFn == nil { return nil } diff --git a/exporter/logzioexporter/exporter.go b/exporter/logzioexporter/exporter.go index 8d32ebf64b17..5b9fc3ab55e5 100644 --- a/exporter/logzioexporter/exporter.go +++ b/exporter/logzioexporter/exporter.go @@ -24,7 +24,8 @@ import ( "github.com/logzio/jaeger-logzio/store" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) @@ -39,7 +40,7 @@ type logzioExporter struct { writer *store.LogzioSpanWriter logger hclog.Logger WriteSpanFunc func(ctx context.Context, span *model.Span) error - InternalTracesToJaegerTraces func(td pdata.Traces) ([]*model.Batch, error) + InternalTracesToJaegerTraces func(td ptrace.Traces) ([]*model.Batch, error) } func newLogzioExporter(config *Config, params component.ExporterCreateSettings) (*logzioExporter, error) { @@ -100,7 +101,7 @@ func newLogzioMetricsExporter(config *Config, set component.ExporterCreateSettin exporterhelper.WithShutdown(exporter.Shutdown)) } -func (exporter *logzioExporter) pushTraceData(ctx context.Context, traces pdata.Traces) error { +func (exporter *logzioExporter) pushTraceData(ctx context.Context, traces ptrace.Traces) error { batches, err := exporter.InternalTracesToJaegerTraces(traces) if err != nil { return err @@ -116,7 +117,7 @@ func (exporter *logzioExporter) pushTraceData(ctx context.Context, traces pdata. return nil } -func (exporter *logzioExporter) pushMetricsData(ctx context.Context, md pdata.Metrics) error { +func (exporter *logzioExporter) pushMetricsData(ctx context.Context, md pmetric.Metrics) error { return nil } diff --git a/exporter/logzioexporter/exporter_test.go b/exporter/logzioexporter/exporter_test.go index 1b927ff83e9b..f7dec5d6a7d9 100644 --- a/exporter/logzioexporter/exporter_test.go +++ b/exporter/logzioexporter/exporter_test.go @@ -33,8 +33,10 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) const ( @@ -43,17 +45,17 @@ const ( testOperation = "testOperation" ) -func newTestTraces() pdata.Traces { - td := pdata.NewTraces() +func newTestTraces() ptrace.Traces { + td := ptrace.NewTraces() s := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() s.SetName(testOperation) - s.SetTraceID(pdata.NewTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1})) - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) - s.SetKind(pdata.SpanKindServer) + s.SetTraceID(pcommon.NewTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s.SetKind(ptrace.SpanKindServer) return td } -func testTracesExporter(td pdata.Traces, t *testing.T, cfg *Config) { +func testTracesExporter(td ptrace.Traces, t *testing.T, cfg *Config) { params := componenttest.NewNopExporterCreateSettings() exporter, err := createTracesExporter(context.Background(), params, cfg) require.NoError(t, err) @@ -71,7 +73,7 @@ func TestNullTracesExporterConfig(tester *testing.T) { assert.Error(tester, err, "Null exporter config should produce error") } -func testMetricsExporter(md pdata.Metrics, t *testing.T, cfg *Config) { +func testMetricsExporter(md pmetric.Metrics, t *testing.T, cfg *Config) { params := componenttest.NewNopExporterCreateSettings() exporter, err := createMetricsExporter(context.Background(), params, cfg) require.NoError(t, err) @@ -100,7 +102,7 @@ func TestEmptyNode(tester *testing.T) { TracesToken: "test", Region: "eu", } - testTracesExporter(pdata.NewTraces(), tester, &cfg) + testTracesExporter(ptrace.NewTraces(), tester, &cfg) } func TestWriteSpanError(tester *testing.T) { @@ -128,7 +130,7 @@ func TestConversionTraceError(tester *testing.T) { exporter, _ := newLogzioExporter(&cfg, params) oldFunc := exporter.InternalTracesToJaegerTraces defer func() { exporter.InternalTracesToJaegerTraces = oldFunc }() - exporter.InternalTracesToJaegerTraces = func(td pdata.Traces) ([]*model.Batch, error) { + exporter.InternalTracesToJaegerTraces = func(td ptrace.Traces) ([]*model.Batch, error) { return nil, errors.New("fail") } err := exporter.pushTraceData(context.Background(), newTestTraces()) @@ -197,7 +199,7 @@ func TestPushMetricsData(tester *testing.T) { Region: "eu", CustomEndpoint: "url", } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() testMetricsExporter(md, tester, &cfg) } diff --git a/exporter/logzioexporter/go.mod b/exporter/logzioexporter/go.mod index 682270ce7bba..2970db6f2f38 100644 --- a/exporter/logzioexporter/go.mod +++ b/exporter/logzioexporter/go.mod @@ -8,8 +8,9 @@ require ( github.com/logzio/jaeger-logzio v1.0.4 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -17,17 +18,16 @@ require ( github.com/apache/thrift v0.16.0 // indirect github.com/avast/retry-go v3.0.0+incompatible // indirect github.com/beeker1121/goque v2.1.0+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/logzio/logzio-go v1.0.3 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -61,12 +61,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -78,3 +74,5 @@ exclude github.com/StackExchange/wmi v1.2.0 replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/logzioexporter/go.sum b/exporter/logzioexporter/go.sum index 6d10d17d5960..1560070634a4 100644 --- a/exporter/logzioexporter/go.sum +++ b/exporter/logzioexporter/go.sum @@ -107,8 +107,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -120,11 +120,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -170,7 +166,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -417,7 +412,6 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= @@ -520,8 +514,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -850,17 +844,19 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -994,8 +990,7 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1263,7 +1258,6 @@ google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1293,9 +1287,7 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1310,7 +1302,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporter/lokiexporter/encode_json.go b/exporter/lokiexporter/encode_json.go index c829847c1386..4273a9ee553b 100644 --- a/exporter/lokiexporter/encode_json.go +++ b/exporter/lokiexporter/encode_json.go @@ -18,7 +18,8 @@ import ( "encoding/json" "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) // JSON representation of the LogRecord as described by https://developers.google.com/protocol-buffers/docs/proto3#json @@ -33,32 +34,32 @@ type lokiEntry struct { Resources map[string]interface{} `json:"resources,omitempty"` } -func serializeBody(body pdata.Value) ([]byte, error) { +func serializeBody(body pcommon.Value) ([]byte, error) { var str []byte var err error switch body.Type() { - case pdata.ValueTypeEmpty: + case pcommon.ValueTypeEmpty: // no body - case pdata.ValueTypeString: + case pcommon.ValueTypeString: str, err = json.Marshal(body.StringVal()) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: str, err = json.Marshal(body.IntVal()) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: str, err = json.Marshal(body.DoubleVal()) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: str, err = json.Marshal(body.BoolVal()) - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: str, err = json.Marshal(body.MapVal().AsRaw()) - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: str, err = json.Marshal(attributeValueSliceAsRaw(body.SliceVal())) - case pdata.ValueTypeBytes: + case pcommon.ValueTypeBytes: str, err = json.Marshal(body.BytesVal()) default: @@ -67,7 +68,7 @@ func serializeBody(body pdata.Value) ([]byte, error) { return str, err } -func encodeJSON(lr pdata.LogRecord, res pdata.Resource) (string, error) { +func encodeJSON(lr plog.LogRecord, res pcommon.Resource) (string, error) { var logRecord lokiEntry var jsonRecord []byte var err error @@ -95,22 +96,22 @@ func encodeJSON(lr pdata.LogRecord, res pdata.Resource) (string, error) { } // Copied from pdata (es AttributeValueSlice) asRaw() since its not exported -func attributeValueSliceAsRaw(es pdata.Slice) []interface{} { +func attributeValueSliceAsRaw(es pcommon.Slice) []interface{} { rawSlice := make([]interface{}, 0, es.Len()) for i := 0; i < es.Len(); i++ { v := es.At(i) switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: rawSlice = append(rawSlice, v.StringVal()) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: rawSlice = append(rawSlice, v.IntVal()) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: rawSlice = append(rawSlice, v.DoubleVal()) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: rawSlice = append(rawSlice, v.BoolVal()) - case pdata.ValueTypeBytes: + case pcommon.ValueTypeBytes: rawSlice = append(rawSlice, v.BytesVal()) - case pdata.ValueTypeEmpty: + case pcommon.ValueTypeEmpty: rawSlice = append(rawSlice, nil) default: rawSlice = append(rawSlice, "") diff --git a/exporter/lokiexporter/encode_json_test.go b/exporter/lokiexporter/encode_json_test.go index b2a791485692..1ed7e5dc0b32 100644 --- a/exporter/lokiexporter/encode_json_test.go +++ b/exporter/lokiexporter/encode_json_test.go @@ -18,21 +18,22 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) -func exampleLog() (pdata.LogRecord, pdata.Resource) { +func exampleLog() (plog.LogRecord, pcommon.Resource) { - buffer := pdata.NewLogRecord() + buffer := plog.NewLogRecord() buffer.Body().SetStringVal("Example log") buffer.SetSeverityText("error") - buffer.Attributes().Insert("attr1", pdata.NewValueString("1")) - buffer.Attributes().Insert("attr2", pdata.NewValueString("2")) - buffer.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) - buffer.SetSpanID(pdata.NewSpanID([8]byte{5, 6, 7, 8})) + buffer.Attributes().Insert("attr1", pcommon.NewValueString("1")) + buffer.Attributes().Insert("attr2", pcommon.NewValueString("2")) + buffer.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) + buffer.SetSpanID(pcommon.NewSpanID([8]byte{5, 6, 7, 8})) - resource := pdata.NewResource() - resource.Attributes().Insert("host.name", pdata.NewValueString("something")) + resource := pcommon.NewResource() + resource.Attributes().Insert("host.name", pcommon.NewValueString("something")) return buffer, resource } @@ -49,9 +50,9 @@ func TestConvertWithMapBody(t *testing.T) { in := `{"body":{"key1":"value","key2":"value"},"traceid":"01020304000000000000000000000000","spanid":"0506070800000000","severity":"error","attributes":{"attr1":"1","attr2":"2"},"resources":{"host.name":"something"}}` log, resource := exampleLog() - mapVal := pdata.NewValueMap() - mapVal.MapVal().Insert("key1", pdata.NewValueString("value")) - mapVal.MapVal().Insert("key2", pdata.NewValueString("value")) + mapVal := pcommon.NewValueMap() + mapVal.MapVal().Insert("key1", pcommon.NewValueString("value")) + mapVal.MapVal().Insert("key2", pcommon.NewValueString("value")) mapVal.CopyTo(log.Body()) out, err := encodeJSON(log, resource) @@ -61,14 +62,14 @@ func TestConvertWithMapBody(t *testing.T) { func TestSerializeBody(t *testing.T) { - arrayval := pdata.NewValueSlice() + arrayval := pcommon.NewValueSlice() arrayval.SliceVal().AppendEmpty().SetStringVal("a") arrayval.SliceVal().AppendEmpty().SetStringVal("b") - simplemap := pdata.NewValueMap() + simplemap := pcommon.NewValueMap() simplemap.MapVal().InsertString("key", "val") - complexmap := pdata.NewValueMap() + complexmap := pcommon.NewValueMap() complexmap.MapVal().InsertString("keystr", "val") complexmap.MapVal().InsertInt("keyint", 1) complexmap.MapVal().InsertDouble("keyint", 1) @@ -76,30 +77,30 @@ func TestSerializeBody(t *testing.T) { complexmap.MapVal().InsertNull("keynull") complexmap.MapVal().Insert("keyarr", arrayval) complexmap.MapVal().Insert("keymap", simplemap) - complexmap.MapVal().Insert("keyempty", pdata.NewValueEmpty()) + complexmap.MapVal().Insert("keyempty", pcommon.NewValueEmpty()) testcases := []struct { - input pdata.Value + input pcommon.Value expected []byte }{ { - pdata.NewValueEmpty(), + pcommon.NewValueEmpty(), nil, }, { - pdata.NewValueString("a"), + pcommon.NewValueString("a"), []byte(`"a"`), }, { - pdata.NewValueInt(1), + pcommon.NewValueInt(1), []byte(`1`), }, { - pdata.NewValueDouble(1.1), + pcommon.NewValueDouble(1.1), []byte(`1.1`), }, { - pdata.NewValueBool(true), + pcommon.NewValueBool(true), []byte(`true`), }, { @@ -115,7 +116,7 @@ func TestSerializeBody(t *testing.T) { []byte(`["a","b"]`), }, { - pdata.NewValueBytes([]byte(`abc`)), + pcommon.NewValueBytes([]byte(`abc`)), []byte(`"YWJj"`), }, } diff --git a/exporter/lokiexporter/exporter.go b/exporter/lokiexporter/exporter.go index e4c2a2d1983a..349cde8bd1d4 100644 --- a/exporter/lokiexporter/exporter.go +++ b/exporter/lokiexporter/exporter.go @@ -32,7 +32,8 @@ import ( "github.com/prometheus/common/model" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" "go.uber.org/zap" @@ -48,7 +49,7 @@ type lokiExporter struct { settings component.TelemetrySettings client *http.Client wg sync.WaitGroup - convert func(pdata.LogRecord, pdata.Resource) (*logproto.Entry, error) + convert func(plog.LogRecord, pcommon.Resource) (*logproto.Entry, error) } func newExporter(config *Config, settings component.TelemetrySettings) *lokiExporter { @@ -64,7 +65,7 @@ func newExporter(config *Config, settings component.TelemetrySettings) *lokiExpo return lokiexporter } -func (l *lokiExporter) pushLogData(ctx context.Context, ld pdata.Logs) error { +func (l *lokiExporter) pushLogData(ctx context.Context, ld plog.Logs) error { l.wg.Add(1) defer l.wg.Done() @@ -140,7 +141,7 @@ func (l *lokiExporter) stop(context.Context) (err error) { return nil } -func (l *lokiExporter) logDataToLoki(ld pdata.Logs) (pr *logproto.PushRequest, numDroppedLogs int) { +func (l *lokiExporter) logDataToLoki(ld plog.Logs) (pr *logproto.PushRequest, numDroppedLogs int) { var errs error streams := make(map[string]*logproto.Stream) @@ -213,7 +214,7 @@ func (l *lokiExporter) logDataToLoki(ld pdata.Logs) (pr *logproto.PushRequest, n return pr, numDroppedLogs } -func (l *lokiExporter) convertAttributesAndMerge(logAttrs pdata.Map, resourceAttrs pdata.Map) (mergedAttributes model.LabelSet, dropped bool) { +func (l *lokiExporter) convertAttributesAndMerge(logAttrs pcommon.Map, resourceAttrs pcommon.Map) (mergedAttributes model.LabelSet, dropped bool) { logRecordAttributes := l.convertAttributesToLabels(logAttrs, l.config.Labels.Attributes) resourceAttributes := l.convertAttributesToLabels(resourceAttrs, l.config.Labels.ResourceAttributes) @@ -226,7 +227,7 @@ func (l *lokiExporter) convertAttributesAndMerge(logAttrs pdata.Map, resourceAtt return mergedAttributes, false } -func (l *lokiExporter) convertAttributesToLabels(attributes pdata.Map, allowedAttributes map[string]string) model.LabelSet { +func (l *lokiExporter) convertAttributesToLabels(attributes pcommon.Map, allowedAttributes map[string]string) model.LabelSet { ls := model.LabelSet{} allowedLabels := l.config.Labels.getAttributes(allowedAttributes) @@ -234,7 +235,7 @@ func (l *lokiExporter) convertAttributesToLabels(attributes pdata.Map, allowedAt for attr, attrLabelName := range allowedLabels { av, ok := attributes.Get(attr) if ok { - if av.Type() != pdata.ValueTypeString { + if av.Type() != pcommon.ValueTypeString { l.settings.Logger.Debug("Failed to convert attribute value to Loki label value, value is not a string", zap.String("attribute", attr)) continue } @@ -245,7 +246,7 @@ func (l *lokiExporter) convertAttributesToLabels(attributes pdata.Map, allowedAt return ls } -func (l *lokiExporter) convertRecordAttributesToLabels(log pdata.LogRecord) model.LabelSet { +func (l *lokiExporter) convertRecordAttributesToLabels(log plog.LogRecord) model.LabelSet { ls := model.LabelSet{} if val, ok := l.config.Labels.RecordAttributes["traceID"]; ok { @@ -267,7 +268,7 @@ func (l *lokiExporter) convertRecordAttributesToLabels(log pdata.LogRecord) mode return ls } -func (l *lokiExporter) convertLogBodyToEntry(lr pdata.LogRecord, res pdata.Resource) (*logproto.Entry, error) { +func (l *lokiExporter) convertLogBodyToEntry(lr plog.LogRecord, res pcommon.Resource) (*logproto.Entry, error) { var b strings.Builder if _, ok := l.config.Labels.RecordAttributes["severity"]; !ok && len(lr.SeverityText()) > 0 { @@ -293,7 +294,7 @@ func (l *lokiExporter) convertLogBodyToEntry(lr pdata.LogRecord, res pdata.Resou // fields not added to the accept-list as part of the component's config // are added to the body, so that they can still be seen under "detected fields" - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { if _, found := l.config.Labels.Attributes[k]; !found { b.WriteString(k) b.WriteString("=") @@ -305,7 +306,7 @@ func (l *lokiExporter) convertLogBodyToEntry(lr pdata.LogRecord, res pdata.Resou // same for resources: include all, except the ones that are explicitly added // as part of the config, which are showing up at the top-level already - res.Attributes().Range(func(k string, v pdata.Value) bool { + res.Attributes().Range(func(k string, v pcommon.Value) bool { if _, found := l.config.Labels.ResourceAttributes[k]; !found { b.WriteString(k) b.WriteString("=") @@ -323,7 +324,7 @@ func (l *lokiExporter) convertLogBodyToEntry(lr pdata.LogRecord, res pdata.Resou }, nil } -func (l *lokiExporter) convertLogToJSONEntry(lr pdata.LogRecord, res pdata.Resource) (*logproto.Entry, error) { +func (l *lokiExporter) convertLogToJSONEntry(lr plog.LogRecord, res pcommon.Resource) (*logproto.Entry, error) { line, err := encodeJSON(lr, res) if err != nil { return nil, err diff --git a/exporter/lokiexporter/exporter_test.go b/exporter/lokiexporter/exporter_test.go index dab046f78b61..a4d01cafac81 100644 --- a/exporter/lokiexporter/exporter_test.go +++ b/exporter/lokiexporter/exporter_test.go @@ -32,8 +32,9 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/lokiexporter/internal/third_party/loki/logproto" ) @@ -54,15 +55,15 @@ var ( } ) -func createLogData(numberOfLogs int, attributes pdata.Map) pdata.Logs { - logs := pdata.NewLogs() +func createLogData(numberOfLogs int, attributes pcommon.Map) plog.Logs { + logs := plog.NewLogs() sl := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() for i := 0; i < numberOfLogs; i++ { - ts := pdata.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { logRecord.Attributes().Insert(k, v) return true }) @@ -105,9 +106,9 @@ func TestExporter_pushLogData(t *testing.T) { } } - genericGenLogsFunc := func() pdata.Logs { + genericGenLogsFunc := func() plog.Logs { return createLogData(10, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeContainerName: "api", conventions.AttributeK8SClusterName: "local", "resource.name": "myresource", @@ -141,7 +142,7 @@ func TestExporter_pushLogData(t *testing.T) { httpResponseCode int testServer bool config *Config - genLogsFunc func() pdata.Logs + genLogsFunc func() plog.Logs errFunc func(err error) }{ { @@ -184,9 +185,9 @@ func TestExporter_pushLogData(t *testing.T) { config: genericConfig, httpResponseCode: http.StatusOK, testServer: true, - genLogsFunc: func() pdata.Logs { + genLogsFunc: func() plog.Logs { return createLogData(10, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ "not.a.match": "random", })) }, @@ -201,11 +202,11 @@ func TestExporter_pushLogData(t *testing.T) { config: genericConfig, httpResponseCode: http.StatusOK, testServer: true, - genLogsFunc: func() pdata.Logs { - outLogs := pdata.NewLogs() + genLogsFunc: func() plog.Logs { + outLogs := plog.NewLogs() matchingLogs := createLogData(10, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeContainerName: "api", conventions.AttributeK8SClusterName: "local", "severity": "debug", @@ -213,7 +214,7 @@ func TestExporter_pushLogData(t *testing.T) { matchingLogs.ResourceLogs().MoveAndAppendTo(outLogs.ResourceLogs()) nonMatchingLogs := createLogData(5, - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ "not.a.match": "random", })) nonMatchingLogs.ResourceLogs().MoveAndAppendTo(outLogs.ResourceLogs()) @@ -277,8 +278,8 @@ func TestExporter_logDataToLoki(t *testing.T) { require.NoError(t, err) t.Run("with attributes that match config", func(t *testing.T) { - logs := pdata.NewLogs() - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + logs := plog.NewLogs() + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) lr := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() lr.Body().SetStringVal("log message") lr.Attributes().InsertString("not.in.config", "not allowed") @@ -291,8 +292,8 @@ func TestExporter_logDataToLoki(t *testing.T) { }) t.Run("with partial attributes that match config", func(t *testing.T) { - logs := pdata.NewLogs() - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + logs := plog.NewLogs() + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) lr := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() lr.Body().SetStringVal("log message") lr.Attributes().InsertString(conventions.AttributeContainerName, "mycontainer") @@ -307,8 +308,8 @@ func TestExporter_logDataToLoki(t *testing.T) { }) t.Run("with multiple logs and same attributes", func(t *testing.T) { - logs := pdata.NewLogs() - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + logs := plog.NewLogs() + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) sl := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() lr1 := sl.LogRecords().AppendEmpty() lr1.Body().SetStringVal("log message 1") @@ -332,8 +333,8 @@ func TestExporter_logDataToLoki(t *testing.T) { }) t.Run("with multiple logs and different attributes", func(t *testing.T) { - logs := pdata.NewLogs() - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + logs := plog.NewLogs() + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) sl := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() lr1 := sl.LogRecords().AppendEmpty() @@ -359,8 +360,8 @@ func TestExporter_logDataToLoki(t *testing.T) { }) t.Run("with attributes and resource attributes that match config", func(t *testing.T) { - logs := pdata.NewLogs() - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + logs := plog.NewLogs() + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) lr := logs.ResourceLogs().AppendEmpty() lr.Resource().Attributes().InsertString("not.in.config", "not allowed") @@ -376,8 +377,8 @@ func TestExporter_logDataToLoki(t *testing.T) { }) t.Run("with attributes and resource attributes", func(t *testing.T) { - logs := pdata.NewLogs() - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + logs := plog.NewLogs() + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) lr := logs.ResourceLogs().AppendEmpty() lr.Resource().Attributes().InsertString("resource.name", "myresource") @@ -419,11 +420,11 @@ func TestExporter_convertAttributesToLabels(t *testing.T) { require.NoError(t, err) t.Run("with attributes that match", func(t *testing.T) { - am := pdata.NewMap() + am := pcommon.NewMap() am.InsertString(conventions.AttributeContainerName, "mycontainer") am.InsertString(conventions.AttributeK8SClusterName, "mycluster") am.InsertString("severity", "debug") - ram := pdata.NewMap() + ram := pcommon.NewMap() ram.InsertString("resource.name", "myresource") // this should overwrite log attribute of the same name ram.InsertString("severity", "info") @@ -439,52 +440,52 @@ func TestExporter_convertAttributesToLabels(t *testing.T) { }) t.Run("with attribute matches and the value is a boolean", func(t *testing.T) { - am := pdata.NewMap() + am := pcommon.NewMap() am.InsertBool("severity", false) - ram := pdata.NewMap() + ram := pcommon.NewMap() ls, _ := exp.convertAttributesAndMerge(am, ram) require.Nil(t, ls) }) t.Run("with attribute that matches and the value is a double", func(t *testing.T) { - am := pdata.NewMap() + am := pcommon.NewMap() am.InsertDouble("severity", float64(0)) - ram := pdata.NewMap() + ram := pcommon.NewMap() ls, _ := exp.convertAttributesAndMerge(am, ram) require.Nil(t, ls) }) t.Run("with attribute that matches and the value is an int", func(t *testing.T) { - am := pdata.NewMap() + am := pcommon.NewMap() am.InsertInt("severity", 0) - ram := pdata.NewMap() + ram := pcommon.NewMap() ls, _ := exp.convertAttributesAndMerge(am, ram) require.Nil(t, ls) }) t.Run("with attribute that matches and the value is null", func(t *testing.T) { - am := pdata.NewMap() + am := pcommon.NewMap() am.InsertNull("severity") - ram := pdata.NewMap() + ram := pcommon.NewMap() ls, _ := exp.convertAttributesAndMerge(am, ram) require.Nil(t, ls) }) } func TestExporter_convertLogBodyToEntry(t *testing.T) { - res := pdata.NewResource() - res.Attributes().Insert("host.name", pdata.NewValueString("something")) - res.Attributes().Insert("pod.name", pdata.NewValueString("something123")) + res := pcommon.NewResource() + res.Attributes().Insert("host.name", pcommon.NewValueString("something")) + res.Attributes().Insert("pod.name", pcommon.NewValueString("something123")) - lr := pdata.NewLogRecord() + lr := plog.NewLogRecord() lr.Body().SetStringVal("Payment succeeded") - lr.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) - lr.SetSpanID(pdata.NewSpanID([8]byte{5, 6, 7, 8})) + lr.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) + lr.SetSpanID(pcommon.NewSpanID([8]byte{5, 6, 7, 8})) lr.SetSeverityText("DEBUG") - lr.SetSeverityNumber(pdata.SeverityNumberDEBUG) - lr.Attributes().Insert("payment_method", pdata.NewValueString("credit_card")) + lr.SetSeverityNumber(plog.SeverityNumberDEBUG) + lr.Attributes().Insert("payment_method", pcommon.NewValueString("credit_card")) - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) lr.SetTimestamp(ts) exp := newExporter(&Config{ @@ -593,12 +594,12 @@ func TestExporter_stopAlwaysReturnsNil(t *testing.T) { } func TestExporter_convertLogtoJSONEntry(t *testing.T) { - ts := pdata.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) - lr := pdata.NewLogRecord() + ts := pcommon.Timestamp(int64(1) * time.Millisecond.Nanoseconds()) + lr := plog.NewLogRecord() lr.Body().SetStringVal("log message") lr.SetTimestamp(ts) - res := pdata.NewResource() - res.Attributes().Insert("host.name", pdata.NewValueString("something")) + res := pcommon.NewResource() + res.Attributes().Insert("host.name", pcommon.NewValueString("something")) exp := newExporter(&Config{}, componenttest.NewNopTelemetrySettings()) entry, err := exp.convertLogToJSONEntry(lr, res) @@ -614,14 +615,14 @@ func TestExporter_convertLogtoJSONEntry(t *testing.T) { func TestConvertRecordAttributesToLabels(t *testing.T) { testCases := []struct { desc string - lr pdata.LogRecord + lr plog.LogRecord expected model.LabelSet }{ { desc: "traceID", - lr: func() pdata.LogRecord { - lr := pdata.NewLogRecord() - lr.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + lr: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) return lr }(), expected: func() model.LabelSet { @@ -632,9 +633,9 @@ func TestConvertRecordAttributesToLabels(t *testing.T) { }, { desc: "spanID", - lr: func() pdata.LogRecord { - lr := pdata.NewLogRecord() - lr.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4})) + lr: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4})) return lr }(), expected: func() model.LabelSet { @@ -645,8 +646,8 @@ func TestConvertRecordAttributesToLabels(t *testing.T) { }, { desc: "severity", - lr: func() pdata.LogRecord { - lr := pdata.NewLogRecord() + lr: func() plog.LogRecord { + lr := plog.NewLogRecord() lr.SetSeverityText("DEBUG") return lr }(), @@ -658,14 +659,14 @@ func TestConvertRecordAttributesToLabels(t *testing.T) { }, { desc: "severityN", - lr: func() pdata.LogRecord { - lr := pdata.NewLogRecord() - lr.SetSeverityNumber(pdata.SeverityNumberDEBUG) + lr: func() plog.LogRecord { + lr := plog.NewLogRecord() + lr.SetSeverityNumber(plog.SeverityNumberDEBUG) return lr }(), expected: func() model.LabelSet { ls := model.LabelSet{} - ls[model.LabelName("severityN")] = model.LabelValue(pdata.SeverityNumberDEBUG.String()) + ls[model.LabelName("severityN")] = model.LabelValue(plog.SeverityNumberDEBUG.String()) return ls }(), }, diff --git a/exporter/lokiexporter/go.mod b/exporter/lokiexporter/go.mod index 93d99c91a92a..47816c74424c 100644 --- a/exporter/lokiexporter/go.mod +++ b/exporter/lokiexporter/go.mod @@ -9,17 +9,16 @@ require ( github.com/prometheus/common v0.33.0 github.com/prometheus/prometheus v1.8.2-0.20220111145625-076109fa1910 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 - ) -require go.uber.org/multierr v1.8.0 - require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect @@ -27,7 +26,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -36,7 +35,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -44,7 +42,7 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -53,3 +51,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/lokiexporter/go.sum b/exporter/lokiexporter/go.sum index 8644da40f637..8079dd8f418b 100644 --- a/exporter/lokiexporter/go.sum +++ b/exporter/lokiexporter/go.sum @@ -209,8 +209,8 @@ github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= @@ -716,7 +716,6 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -862,8 +861,8 @@ github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1191,8 +1190,6 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1310,10 +1307,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -1324,7 +1323,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -1649,8 +1648,9 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/exporter/newrelicexporter/go.mod b/exporter/newrelicexporter/go.mod index f2193543a7a2..ba9d3ba4f32d 100644 --- a/exporter/newrelicexporter/go.mod +++ b/exporter/newrelicexporter/go.mod @@ -9,8 +9,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa @@ -19,24 +20,23 @@ require ( ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -45,3 +45,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/newrelicexporter/go.sum b/exporter/newrelicexporter/go.sum index 3dd5d1e480cc..fec8233be143 100644 --- a/exporter/newrelicexporter/go.sum +++ b/exporter/newrelicexporter/go.sum @@ -18,8 +18,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -92,7 +92,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -124,8 +123,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -170,8 +169,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -187,17 +184,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -234,8 +233,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -259,8 +258,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/exporter/newrelicexporter/metrics.go b/exporter/newrelicexporter/metrics.go index e4b507558cd9..6071ac290d4c 100644 --- a/exporter/newrelicexporter/metrics.go +++ b/exporter/newrelicexporter/metrics.go @@ -23,7 +23,8 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" @@ -96,8 +97,8 @@ func buildView(tagKeys []tag.Key, m stats.Measure, a *view.Aggregation) *view.Vi } type metricStatsKey struct { - MetricType pdata.MetricDataType - MetricTemporality pdata.MetricAggregationTemporality + MetricType pmetric.MetricDataType + MetricTemporality pmetric.MetricAggregationTemporality } type spanStatsKey struct { @@ -130,7 +131,7 @@ func (al attributeLocation) String() string { type attributeStatsKey struct { location attributeLocation - attributeType pdata.ValueType + attributeType pcommon.ValueType } type exportMetadata struct { diff --git a/exporter/newrelicexporter/metrics_test.go b/exporter/newrelicexporter/metrics_test.go index 7e68b78992ef..11d88ea68fec 100644 --- a/exporter/newrelicexporter/metrics_test.go +++ b/exporter/newrelicexporter/metrics_test.go @@ -24,7 +24,8 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" ) @@ -145,10 +146,10 @@ func TestRecordMetricMetadata(t *testing.T) { dataOutputCount: 20, externalDuration: 50, metricMetadataCount: map[metricStatsKey]int{ - {MetricType: pdata.MetricDataTypeSummary}: 1, - {MetricType: pdata.MetricDataTypeHistogram}: 1, - {MetricType: pdata.MetricDataTypeSum, MetricTemporality: pdata.MetricAggregationTemporalityDelta}: 2, - {MetricType: pdata.MetricDataTypeSum, MetricTemporality: pdata.MetricAggregationTemporalityCumulative}: 3, + {MetricType: pmetric.MetricDataTypeSummary}: 1, + {MetricType: pmetric.MetricDataTypeHistogram}: 1, + {MetricType: pmetric.MetricDataTypeSum, MetricTemporality: pmetric.MetricAggregationTemporalityDelta}: 2, + {MetricType: pmetric.MetricDataTypeSum, MetricTemporality: pmetric.MetricAggregationTemporalityCumulative}: 3, }, } @@ -293,13 +294,13 @@ func TestRecordAttributeMetadata(t *testing.T) { dataOutputCount: 20, externalDuration: 50, attributeMetadataCount: map[attributeStatsKey]int{ - {attributeType: pdata.ValueTypeSlice, location: attributeLocationResource}: 1, - {attributeType: pdata.ValueTypeBool, location: attributeLocationSpan}: 1, - {attributeType: pdata.ValueTypeMap, location: attributeLocationSpanEvent}: 1, - {attributeType: pdata.ValueTypeDouble, location: attributeLocationLog}: 1, - {attributeType: pdata.ValueTypeInt, location: attributeLocationResource}: 1, - {attributeType: pdata.ValueTypeEmpty, location: attributeLocationSpan}: 1, - {attributeType: pdata.ValueTypeString, location: attributeLocationSpanEvent}: 1, + {attributeType: pcommon.ValueTypeSlice, location: attributeLocationResource}: 1, + {attributeType: pcommon.ValueTypeBool, location: attributeLocationSpan}: 1, + {attributeType: pcommon.ValueTypeMap, location: attributeLocationSpanEvent}: 1, + {attributeType: pcommon.ValueTypeDouble, location: attributeLocationLog}: 1, + {attributeType: pcommon.ValueTypeInt, location: attributeLocationResource}: 1, + {attributeType: pcommon.ValueTypeEmpty, location: attributeLocationSpan}: 1, + {attributeType: pcommon.ValueTypeString, location: attributeLocationSpanEvent}: 1, }, } diff --git a/exporter/newrelicexporter/newrelic.go b/exporter/newrelicexporter/newrelic.go index 4d25608631ee..8c79ce3c884e 100644 --- a/exporter/newrelicexporter/newrelic.go +++ b/exporter/newrelicexporter/newrelic.go @@ -25,7 +25,9 @@ import ( "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "go.uber.org/zap" "google.golang.org/grpc/metadata" @@ -121,14 +123,14 @@ func (e exporter) extractAPIKeyFromHeader(ctx context.Context) string { return values[0] } -func (e exporter) pushTraceData(ctx context.Context, td pdata.Traces) (outputErr error) { +func (e exporter) pushTraceData(ctx context.Context, td ptrace.Traces) (outputErr error) { details := newTraceMetadata(ctx) details.dataInputCount = td.SpanCount() builder := func() ([]telemetry.Batch, error) { return e.buildTraceBatch(&details, td) } return e.export(ctx, &details, builder) } -func (e exporter) buildTraceBatch(details *exportMetadata, td pdata.Traces) ([]telemetry.Batch, error) { +func (e exporter) buildTraceBatch(details *exportMetadata, td ptrace.Traces) ([]telemetry.Batch, error) { var errs error transform := newTransformer(e.logger, e.buildInfo, details) @@ -165,7 +167,7 @@ func (e exporter) buildTraceBatch(details *exportMetadata, td pdata.Traces) ([]t return batches, errs } -func calcSpanBatches(td pdata.Traces) int { +func calcSpanBatches(td ptrace.Traces) int { rss := td.ResourceSpans() batchCount := 0 for i := 0; i < rss.Len(); i++ { @@ -174,14 +176,14 @@ func calcSpanBatches(td pdata.Traces) int { return batchCount } -func (e exporter) pushLogData(ctx context.Context, ld pdata.Logs) (outputErr error) { +func (e exporter) pushLogData(ctx context.Context, ld plog.Logs) (outputErr error) { details := newLogMetadata(ctx) details.dataInputCount = ld.LogRecordCount() builder := func() ([]telemetry.Batch, error) { return e.buildLogBatch(&details, ld) } return e.export(ctx, &details, builder) } -func (e exporter) buildLogBatch(details *exportMetadata, ld pdata.Logs) ([]telemetry.Batch, error) { +func (e exporter) buildLogBatch(details *exportMetadata, ld plog.Logs) ([]telemetry.Batch, error) { var errs error transform := newTransformer(e.logger, e.buildInfo, details) @@ -218,7 +220,7 @@ func (e exporter) buildLogBatch(details *exportMetadata, ld pdata.Logs) ([]telem return batches, errs } -func calcLogBatches(ld pdata.Logs) int { +func calcLogBatches(ld plog.Logs) int { rss := ld.ResourceLogs() batchCount := 0 for i := 0; i < rss.Len(); i++ { @@ -227,14 +229,14 @@ func calcLogBatches(ld pdata.Logs) int { return batchCount } -func (e exporter) pushMetricData(ctx context.Context, md pdata.Metrics) (outputErr error) { +func (e exporter) pushMetricData(ctx context.Context, md pmetric.Metrics) (outputErr error) { details := newMetricMetadata(ctx) details.dataInputCount = md.DataPointCount() builder := func() ([]telemetry.Batch, error) { return e.buildMetricBatch(&details, md) } return e.export(ctx, &details, builder) } -func (e exporter) buildMetricBatch(details *exportMetadata, md pdata.Metrics) ([]telemetry.Batch, error) { +func (e exporter) buildMetricBatch(details *exportMetadata, md pmetric.Metrics) ([]telemetry.Batch, error) { var errs error transform := newTransformer(e.logger, e.buildInfo, details) @@ -278,7 +280,7 @@ func (e exporter) buildMetricBatch(details *exportMetadata, md pdata.Metrics) ([ return batches, errs } -func calcMetricBatches(md pdata.Metrics) int { +func calcMetricBatches(md pmetric.Metrics) int { rss := md.ResourceMetrics() batchCount := 0 for i := 0; i < rss.Len(); i++ { diff --git a/exporter/newrelicexporter/newrelic_test.go b/exporter/newrelicexporter/newrelic_test.go index 5d5208ac5850..3b10447326f3 100644 --- a/exporter/newrelicexporter/newrelic_test.go +++ b/exporter/newrelicexporter/newrelic_test.go @@ -34,7 +34,10 @@ import ( "go.opencensus.io/stats/view" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/grpc/metadata" "google.golang.org/protobuf/types/known/timestamppb" @@ -53,7 +56,7 @@ type mockConfig struct { responseHeaders map[string]string } -func runTraceMock(initialContext context.Context, ptrace pdata.Traces, cfg mockConfig) (*Mock, error) { +func runTraceMock(initialContext context.Context, ptrace ptrace.Traces, cfg mockConfig) (*Mock, error) { ctx, cancel := context.WithCancel(initialContext) defer cancel() @@ -100,7 +103,7 @@ func runTraceMock(initialContext context.Context, ptrace pdata.Traces, cfg mockC return m, nil } -func runMetricMock(initialContext context.Context, pmetrics pdata.Metrics, cfg mockConfig) (*Mock, error) { +func runMetricMock(initialContext context.Context, pmetrics pmetric.Metrics, cfg mockConfig) (*Mock, error) { ctx, cancel := context.WithCancel(initialContext) defer cancel() @@ -143,7 +146,7 @@ func runMetricMock(initialContext context.Context, pmetrics pdata.Metrics, cfg m return m, nil } -func runLogMock(initialContext context.Context, plogs pdata.Logs, cfg mockConfig) (*Mock, error) { +func runLogMock(initialContext context.Context, plogs plog.Logs, cfg mockConfig) (*Mock, error) { ctx, cancel := context.WithCancel(initialContext) defer cancel() @@ -186,7 +189,7 @@ func runLogMock(initialContext context.Context, plogs pdata.Logs, cfg mockConfig return m, nil } -func testTraceData(t *testing.T, expected []Batch, td pdata.Traces, apiKey string) { +func testTraceData(t *testing.T, expected []Batch, td ptrace.Traces, apiKey string) { ctx := context.Background() useAPIKeyHeader := apiKey != "" if useAPIKeyHeader { @@ -217,7 +220,7 @@ func testMetricData(t *testing.T, expected []Batch, md *agentmetricspb.ExportMet assert.Equal(t, expected, m.Batches) } -func testLogData(t *testing.T, expected []Batch, logs pdata.Logs, apiKey string) { +func testLogData(t *testing.T, expected []Batch, logs plog.Logs, apiKey string) { ctx := context.Background() useAPIKeyHeader := apiKey != "" if useAPIKeyHeader { @@ -269,10 +272,10 @@ func TestExportTraceWithBadPayload(t *testing.T) { func TestExportTraceWithInvalidMetadata(t *testing.T) { // TODO: Newrelic owners to investigate why passing valid data "newTestTraces()" does not return error. - td := pdata.NewTraces() + td := ptrace.NewTraces() s := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() s.SetName("a") - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) _, err := runTraceMock(context.Background(), td, mockConfig{useAPIKeyHeader: true}) require.Error(t, err) @@ -280,10 +283,10 @@ func TestExportTraceWithInvalidMetadata(t *testing.T) { func TestExportTraceWithNoAPIKeyInMetadata(t *testing.T) { // TODO: Newrelic owners to investigate why passing valid data "newTestTraces()" does not return error. - td := pdata.NewTraces() + td := ptrace.NewTraces() s := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() s.SetName("a") - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) ctx := metadata.NewIncomingContext(context.Background(), metadata.MD{}) _, err := runTraceMock(ctx, td, mockConfig{useAPIKeyHeader: true}) @@ -292,8 +295,8 @@ func TestExportTraceWithNoAPIKeyInMetadata(t *testing.T) { func TestExportTracePartialData(t *testing.T) { ptrace := newTestTraces() - ptrace.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).SetTraceID(pdata.NewTraceID([16]byte{})) - ptrace.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).SetSpanID(pdata.NewSpanID([8]byte{})) + ptrace.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).SetTraceID(pcommon.NewTraceID([16]byte{})) + ptrace.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).SetSpanID(pcommon.NewSpanID([8]byte{})) _, err := runTraceMock(context.Background(), ptrace, mockConfig{useAPIKeyHeader: false}) require.Error(t, err) @@ -302,11 +305,11 @@ func TestExportTracePartialData(t *testing.T) { } func TestExportTraceDataMinimum(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() s1 := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() s1.SetName("root") - s1.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s1.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s1.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s1.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) expected := []Batch{ { @@ -334,25 +337,25 @@ func TestExportTraceDataMinimum(t *testing.T) { } func TestExportTraceDataFullTrace(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().UpsertString("service.name", "test-service") rs.Resource().Attributes().UpsertString("resource", "R1") sps := rs.ScopeSpans().AppendEmpty().Spans() s1 := sps.AppendEmpty() s1.SetName("root") - s1.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s1.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s1.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s1.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) s2 := sps.AppendEmpty() s2.SetName("client") - s2.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s2.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) - s2.SetParentSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s2.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s2.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s2.SetParentSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) s3 := sps.AppendEmpty() s3.SetName("server") - s3.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s3.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) - s3.SetParentSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s3.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s3.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) + s3.SetParentSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) expected := []Batch{ { @@ -398,13 +401,13 @@ func TestExportTraceDataFullTrace(t *testing.T) { } func TestExportMetricUnsupported(t *testing.T) { - ms := pdata.NewMetrics() + ms := pmetric.NewMetrics() m := ms.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) dp := m.Histogram().DataPoints().AppendEmpty() dp.SetCount(1) dp.SetSum(1) - dp.SetTimestamp(pdata.NewTimestampFromTime(time.Now())) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) _, err := runMetricMock(context.Background(), ms, mockConfig{useAPIKeyHeader: false}) var unsupportedErr *errUnsupportedMetricType @@ -647,12 +650,12 @@ func TestExportMetricDataFull(t *testing.T) { func TestExportLogs(t *testing.T) { timestamp := time.Now() - logs := pdata.NewLogs() + logs := plog.NewLogs() rlog := logs.ResourceLogs().AppendEmpty() rlog.Resource().Attributes().InsertString("resource", "R1") rlog.Resource().Attributes().InsertString("service.name", "test-service") l := rlog.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() - l.SetTimestamp(pdata.NewTimestampFromTime(timestamp)) + l.SetTimestamp(pcommon.NewTimestampFromTime(timestamp)) l.Body().SetStringVal("log body") l.Attributes().InsertString("foo", "bar") @@ -715,11 +718,11 @@ func TestCreatesClientOptionWithVersionInUserAgent(t *testing.T) { exp, err := f.CreateTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), c) require.NoError(t, err) - ptrace := pdata.NewTraces() + ptrace := ptrace.NewTraces() s := ptrace.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() s.SetName("root") - s.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) err = exp.ConsumeTraces(ctx, ptrace) require.NoError(t, err) @@ -760,13 +763,13 @@ func TestBadSpanResourceGeneratesError(t *testing.T) { exp, err := f.CreateTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), c) require.NoError(t, err) - ptrace := pdata.NewTraces() + ptrace := ptrace.NewTraces() rs := ptrace.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertDouble("badattribute", math.Inf(1)) s := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() s.SetName("root") - s.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) errorFromConsumeTraces := exp.ConsumeTraces(ctx, ptrace) @@ -807,7 +810,7 @@ func TestBadMetricResourceGeneratesError(t *testing.T) { exp, err := f.CreateMetricsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), c) require.NoError(t, err) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertDouble("badattribute", math.Inf(1)) metric := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() @@ -852,7 +855,7 @@ func TestBadLogResourceGeneratesError(t *testing.T) { exp, err := f.CreateLogsExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), c) require.NoError(t, err) - ld := pdata.NewLogs() + ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertDouble("badattribute", math.Inf(1)) rl.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() @@ -901,11 +904,11 @@ func TestFailureToRecordMetricsDoesNotAffectExportingData(t *testing.T) { exp, err := f.CreateTracesExporter(context.Background(), componenttest.NewNopExporterCreateSettings(), c) require.NoError(t, err) - ptrace := pdata.NewTraces() + ptrace := ptrace.NewTraces() s := ptrace.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() s.SetName("root") - s.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) // Create a long string so that the user-agent will be too long and cause RecordMetric to fail b := make([]byte, 300) @@ -921,16 +924,16 @@ func TestFailureToRecordMetricsDoesNotAffectExportingData(t *testing.T) { assert.Contains(t, m.Header[http.CanonicalHeaderKey("user-agent")][0], testCollectorName) } -func newTestTraces() pdata.Traces { - td := pdata.NewTraces() +func newTestTraces() ptrace.Traces { + td := ptrace.NewTraces() sps := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans() s1 := sps.AppendEmpty() s1.SetName("a") - s1.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s1.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s1.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s1.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) s2 := sps.AppendEmpty() s2.SetName("b") - s2.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s2.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s2.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s2.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2})) return td } diff --git a/exporter/newrelicexporter/transformer.go b/exporter/newrelicexporter/transformer.go index 85c200e0d7b3..6a25e77eef07 100644 --- a/exporter/newrelicexporter/transformer.go +++ b/exporter/newrelicexporter/transformer.go @@ -24,8 +24,11 @@ import ( "github.com/newrelic/newrelic-telemetry-sdk-go/telemetry" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -63,7 +66,7 @@ func newTransformer(logger *zap.Logger, buildInfo *component.BuildInfo, details return &transformer{logger: logger, OverrideAttributes: overrideAttributes, details: details} } -func (t *transformer) CommonAttributes(resource pdata.Resource, lib pdata.InstrumentationScope) map[string]interface{} { +func (t *transformer) CommonAttributes(resource pcommon.Resource, lib pcommon.InstrumentationScope) map[string]interface{} { resourceAttrs := resource.Attributes() commonAttrs := resourceAttrs.AsRaw() t.TrackAttributes(attributeLocationResource, resourceAttrs) @@ -87,7 +90,7 @@ var ( errInvalidTraceID = errors.New("TraceID is invalid") ) -func (t *transformer) Span(span pdata.Span) (telemetry.Span, error) { +func (t *transformer) Span(span ptrace.Span) (telemetry.Span, error) { startTime := span.StartTimestamp().AsTime() sp := telemetry.Span{ // HexString validates the IDs, it will be an empty string if invalid. @@ -117,7 +120,7 @@ func (t *transformer) Span(span pdata.Span) (telemetry.Span, error) { return sp, nil } -func (t *transformer) Log(log pdata.LogRecord) (telemetry.Log, error) { +func (t *transformer) Log(log plog.LogRecord) (telemetry.Log, error) { var message string if bodyString := log.Body().StringVal(); bodyString != "" { @@ -162,13 +165,13 @@ func (t *transformer) Log(log pdata.LogRecord) (telemetry.Log, error) { }, nil } -func (t *transformer) SpanAttributes(span pdata.Span) map[string]interface{} { +func (t *transformer) SpanAttributes(span ptrace.Span) map[string]interface{} { spanAttrs := span.Attributes() length := spanAttrs.Len() var hasStatusCode, hasStatusDesc bool s := span.Status() - if s.Code() != pdata.StatusCodeUnset { + if s.Code() != ptrace.StatusCodeUnset { hasStatusCode = true length++ if s.Message() != "" { @@ -177,7 +180,7 @@ func (t *transformer) SpanAttributes(span pdata.Span) map[string]interface{} { } } - validSpanKind := span.Kind() != pdata.SpanKindUnspecified + validSpanKind := span.Kind() != ptrace.SpanKindUnspecified if validSpanKind { length++ } @@ -218,7 +221,7 @@ func (t *transformer) SpanAttributes(span pdata.Span) map[string]interface{} { } // SpanEvents transforms the recorded events of span into New Relic tracing events. -func (t *transformer) SpanEvents(span pdata.Span) []telemetry.Event { +func (t *transformer) SpanEvents(span ptrace.Span) []telemetry.Event { length := span.Events().Len() if length == 0 { return nil @@ -254,7 +257,7 @@ func (e errUnsupportedMetricType) Error() string { return fmt.Sprintf("unsupported metric %v (%v)", e.metricName, e.metricType) } -func (t *transformer) Metric(m pdata.Metric) ([]telemetry.Metric, error) { +func (t *transformer) Metric(m pmetric.Metric) ([]telemetry.Metric, error) { var output []telemetry.Metric baseAttributes := t.BaseMetricAttributes(m) @@ -262,7 +265,7 @@ func (t *transformer) Metric(m pdata.Metric) ([]telemetry.Metric, error) { k := metricStatsKey{MetricType: dataType} switch dataType { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: t.details.metricMetadataCount[k]++ // "StartTimestampUnixNano" is ignored for all data points. gauge := m.Gauge() @@ -273,9 +276,9 @@ func (t *transformer) Metric(m pdata.Metric) ([]telemetry.Metric, error) { var val float64 switch point.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val = point.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val = float64(point.IntVal()) } attributes := t.MetricAttributes(baseAttributes, point.Attributes()) @@ -288,7 +291,7 @@ func (t *transformer) Metric(m pdata.Metric) ([]telemetry.Metric, error) { } output = append(output, nrMetric) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: sum := m.Sum() temporality := sum.AggregationTemporality() k.MetricTemporality = temporality @@ -301,13 +304,13 @@ func (t *transformer) Metric(m pdata.Metric) ([]telemetry.Metric, error) { attributes := t.MetricAttributes(baseAttributes, point.Attributes()) var val float64 switch point.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val = point.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val = float64(point.IntVal()) } - if temporality != pdata.MetricAggregationTemporalityDelta { + if temporality != pmetric.MetricAggregationTemporalityDelta { t.logger.Debug("Converting metric to gauge where AggregationTemporality != Delta", zap.String("MetricName", m.Name()), zap.Stringer("Temporality", temporality), @@ -332,12 +335,12 @@ func (t *transformer) Metric(m pdata.Metric) ([]telemetry.Metric, error) { output = append(output, nrMetric) } } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: hist := m.Histogram() k.MetricTemporality = hist.AggregationTemporality() t.details.metricMetadataCount[k]++ return nil, consumererror.NewPermanent(&errUnsupportedMetricType{metricType: k.MetricType.String(), metricName: m.Name(), numDataPoints: hist.DataPoints().Len()}) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: t.details.metricMetadataCount[k]++ summary := m.Summary() points := summary.DataPoints() @@ -385,7 +388,7 @@ func (t *transformer) Metric(m pdata.Metric) ([]telemetry.Metric, error) { return output, nil } -func (t *transformer) BaseMetricAttributes(metric pdata.Metric) map[string]interface{} { +func (t *transformer) BaseMetricAttributes(metric pmetric.Metric) map[string]interface{} { length := 0 if metric.Unit() != "" { @@ -408,12 +411,12 @@ func (t *transformer) BaseMetricAttributes(metric pdata.Metric) map[string]inter return attrs } -func (t *transformer) MetricAttributes(baseAttributes map[string]interface{}, attrMap pdata.Map) map[string]interface{} { +func (t *transformer) MetricAttributes(baseAttributes map[string]interface{}, attrMap pcommon.Map) map[string]interface{} { rawMap := make(map[string]interface{}, len(baseAttributes)+attrMap.Len()) for k, v := range baseAttributes { rawMap[k] = v } - attrMap.Range(func(k string, v pdata.Value) bool { + attrMap.Range(func(k string, v pcommon.Value) bool { // Only include attribute if not an override attribute if _, isOverrideKey := t.OverrideAttributes[k]; !isOverrideKey { rawMap[k] = v.AsString() @@ -424,8 +427,8 @@ func (t *transformer) MetricAttributes(baseAttributes map[string]interface{}, at return rawMap } -func (t *transformer) TrackAttributes(location attributeLocation, attributeMap pdata.Map) { - attributeMap.Range(func(_ string, v pdata.Value) bool { +func (t *transformer) TrackAttributes(location attributeLocation, attributeMap pcommon.Map) { + attributeMap.Range(func(_ string, v pcommon.Value) bool { statsKey := attributeStatsKey{location: location, attributeType: v.Type()} t.details.attributeMetadataCount[statsKey]++ return true diff --git a/exporter/newrelicexporter/transformer_test.go b/exporter/newrelicexporter/transformer_test.go index 9f6061a0da1c..e5aa6a90f6bb 100644 --- a/exporter/newrelicexporter/transformer_test.go +++ b/exporter/newrelicexporter/transformer_test.go @@ -26,8 +26,11 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -37,10 +40,10 @@ func TestCommonAttributes(t *testing.T) { Version: "0.0.1", } - resource := pdata.NewResource() + resource := pcommon.NewResource() resource.Attributes().InsertString("resource", "R1") - ilm := pdata.NewInstrumentationScope() + ilm := pcommon.NewInstrumentationScope() ilm.SetName("test name") ilm.SetVersion("test version") @@ -53,7 +56,7 @@ func TestCommonAttributes(t *testing.T) { assert.Equal(t, "test version", commonAttrs[conventions.OtelLibraryVersion]) assert.Equal(t, 1, len(details.attributeMetadataCount)) - assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationResource, attributeType: pdata.ValueTypeString}]) + assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationResource, attributeType: pcommon.ValueTypeString}]) } func TestDoesNotCaptureResourceAttributeMetadata(t *testing.T) { @@ -62,9 +65,9 @@ func TestDoesNotCaptureResourceAttributeMetadata(t *testing.T) { Version: "0.0.1", } - resource := pdata.NewResource() + resource := pcommon.NewResource() - ilm := pdata.NewInstrumentationScope() + ilm := pcommon.NewInstrumentationScope() ilm.SetName("test name") ilm.SetVersion("test version") @@ -82,14 +85,14 @@ func TestCaptureSpanMetadata(t *testing.T) { tests := []struct { name string err error - spanFunc func() pdata.Span + spanFunc func() ptrace.Span wantKey spanStatsKey }{ { name: "no events or links", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("no events or links") return s }, @@ -98,9 +101,9 @@ func TestCaptureSpanMetadata(t *testing.T) { }, { name: "has events but no links", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) s.SetName("invalid SpanID") s.Events().AppendEmpty() return s @@ -110,10 +113,10 @@ func TestCaptureSpanMetadata(t *testing.T) { }, { name: "no events but has links", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("no events but has links") s.Links().AppendEmpty() return s @@ -122,11 +125,11 @@ func TestCaptureSpanMetadata(t *testing.T) { }, { name: "has events and links", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) - s.SetParentSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s.SetParentSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("has events and links") s.Events().AppendEmpty() s.Links().AppendEmpty() @@ -153,10 +156,10 @@ func TestCaptureSpanAttributeMetadata(t *testing.T) { details := newTraceMetadata(context.TODO()) transform := newTransformer(zap.NewNop(), nil, &details) - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) - s.SetParentSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s.SetParentSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("test span") se := s.Events().AppendEmpty() @@ -168,18 +171,18 @@ func TestCaptureSpanAttributeMetadata(t *testing.T) { require.NoError(t, err) assert.Equal(t, 2, len(details.attributeMetadataCount)) - assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationSpan, attributeType: pdata.ValueTypeInt}]) - assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationSpanEvent, attributeType: pdata.ValueTypeBool}]) + assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationSpan, attributeType: pcommon.ValueTypeInt}]) + assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationSpanEvent, attributeType: pcommon.ValueTypeBool}]) } func TestDoesNotCaptureSpanAttributeMetadata(t *testing.T) { details := newTraceMetadata(context.TODO()) transform := newTransformer(zap.NewNop(), nil, &details) - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) - s.SetParentSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s.SetParentSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("test span") s.Events().AppendEmpty() @@ -197,14 +200,14 @@ func TestTransformSpan(t *testing.T) { tests := []struct { name string err error - spanFunc func() pdata.Span + spanFunc func() ptrace.Span want telemetry.Span }{ { name: "invalid TraceID", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("invalid TraceID") return s }, @@ -218,9 +221,9 @@ func TestTransformSpan(t *testing.T) { }, { name: "invalid SpanID", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) s.SetName("invalid SpanID") return s }, @@ -234,10 +237,10 @@ func TestTransformSpan(t *testing.T) { }, { name: "root", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("root") return s }, @@ -252,11 +255,11 @@ func TestTransformSpan(t *testing.T) { }, { name: "client", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) - s.SetParentSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 2})) + s.SetParentSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) s.SetName("client") return s }, @@ -272,12 +275,12 @@ func TestTransformSpan(t *testing.T) { }, { name: "error code", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() s.SetName("error code") - s.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) - s.Status().SetCode(pdata.StatusCodeError) + s.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) + s.Status().SetCode(ptrace.StatusCodeError) return s }, want: telemetry.Span{ @@ -293,12 +296,12 @@ func TestTransformSpan(t *testing.T) { }, { name: "error message", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() s.SetName("error message") - s.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) - s.Status().SetCode(pdata.StatusCodeError) + s.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3})) + s.Status().SetCode(ptrace.StatusCodeError) s.Status().SetMessage("error message") return s }, @@ -316,11 +319,11 @@ func TestTransformSpan(t *testing.T) { }, { name: "attributes", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() s.SetName("attrs") - s.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 4})) + s.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 4})) s.Attributes().UpsertBool("prod", true) s.Attributes().UpsertInt("weight", 10) s.Attributes().UpsertDouble("score", 99.8) @@ -343,13 +346,13 @@ func TestTransformSpan(t *testing.T) { }, { name: "with timestamps", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 5})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 5})) s.SetName("with time") - s.SetStartTimestamp(pdata.NewTimestampFromTime(now)) - s.SetEndTimestamp(pdata.NewTimestampFromTime(now.Add(time.Second * 5))) + s.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) + s.SetEndTimestamp(pcommon.NewTimestampFromTime(now.Add(time.Second * 5))) return s }, want: telemetry.Span{ @@ -364,12 +367,12 @@ func TestTransformSpan(t *testing.T) { }, { name: "span kind server", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 6})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 6})) s.SetName("span kind server") - s.SetKind(pdata.SpanKindServer) + s.SetKind(ptrace.SpanKindServer) return s }, want: telemetry.Span{ @@ -385,15 +388,15 @@ func TestTransformSpan(t *testing.T) { }, { name: "with events", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 7})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 7})) s.SetName("with events") event := s.Events().AppendEmpty() event.SetName("this is the event name") - event.SetTimestamp(pdata.NewTimestampFromTime(now)) + event.SetTimestamp(pcommon.NewTimestampFromTime(now)) return s }, want: telemetry.Span{ @@ -413,10 +416,10 @@ func TestTransformSpan(t *testing.T) { }, { name: "with dropped attributes", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 8})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 8})) s.SetName("with dropped attributes") s.SetDroppedAttributesCount(2) return s @@ -434,10 +437,10 @@ func TestTransformSpan(t *testing.T) { }, { name: "with dropped events", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 9})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 9})) s.SetName("with dropped events") s.SetDroppedEventsCount(3) return s @@ -455,17 +458,17 @@ func TestTransformSpan(t *testing.T) { }, { name: "with dropped attributes on events", - spanFunc: func() pdata.Span { - s := pdata.NewSpan() - s.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - s.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 10})) + spanFunc: func() ptrace.Span { + s := ptrace.NewSpan() + s.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + s.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 10})) s.SetName("with dropped attributes on events") - ev := pdata.NewSpanEventSlice() + ev := ptrace.NewSpanEventSlice() ev.EnsureCapacity(1) event := ev.AppendEmpty() event.SetName("this is the event name") - event.SetTimestamp(pdata.NewTimestampFromTime(now)) + event.SetTimestamp(pcommon.NewTimestampFromTime(now)) event.SetDroppedAttributesCount(1) tgt := s.Events().AppendEmpty() event.CopyTo(tgt) @@ -503,14 +506,14 @@ func TestTransformSpan(t *testing.T) { } } -func testTransformMetric(t *testing.T, metric pdata.Metric, want []telemetry.Metric) { +func testTransformMetric(t *testing.T, metric pmetric.Metric, want []telemetry.Metric) { comparer := func(t *testing.T, want []telemetry.Metric, got []telemetry.Metric) { assert.Equal(t, want, got) } testTransformMetricWithComparer(t, metric, want, comparer) } -func testTransformMetricWithComparer(t *testing.T, metric pdata.Metric, want []telemetry.Metric, compare func(t *testing.T, want []telemetry.Metric, got []telemetry.Metric)) { +func testTransformMetricWithComparer(t *testing.T, metric pmetric.Metric, want []telemetry.Metric, compare func(t *testing.T, want []telemetry.Metric, got []telemetry.Metric)) { details := newMetricMetadata(context.Background()) transform := newTransformer(zap.NewNop(), &component.BuildInfo{ Command: testCollectorName, @@ -527,7 +530,7 @@ func testTransformMetricWithComparer(t *testing.T, metric pdata.Metric, want []t } } -func testTransformMetricWithError(t *testing.T, metric pdata.Metric, expectedErrorType interface{}) { +func testTransformMetricWithError(t *testing.T, metric pmetric.Metric, expectedErrorType interface{}) { details := newMetricMetadata(context.Background()) transform := newTransformer(zap.NewNop(), &component.BuildInfo{ Command: testCollectorName, @@ -544,7 +547,7 @@ func testTransformMetricWithError(t *testing.T, metric pdata.Metric, expectedErr } func TestTransformGauge(t *testing.T) { - ts := pdata.NewTimestampFromTime(time.Unix(1, 0)) + ts := pcommon.NewTimestampFromTime(time.Unix(1, 0)) expected := []telemetry.Metric{ telemetry.Gauge{ Name: "gauge", @@ -557,11 +560,11 @@ func TestTransformGauge(t *testing.T) { }, } { - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("gauge") m.SetDescription("description") m.SetUnit("1") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) gd := m.Gauge() dp := gd.DataPoints().AppendEmpty() dp.SetTimestamp(ts) @@ -569,11 +572,11 @@ func TestTransformGauge(t *testing.T) { t.Run("Double", func(t *testing.T) { testTransformMetric(t, m, expected) }) } { - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("gauge") m.SetDescription("description") m.SetUnit("1") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) gi := m.Gauge() dp := gi.DataPoints().AppendEmpty() dp.SetTimestamp(ts) @@ -583,8 +586,8 @@ func TestTransformGauge(t *testing.T) { } func TestTransformSum(t *testing.T) { - start := pdata.NewTimestampFromTime(time.Unix(1, 0)) - end := pdata.NewTimestampFromTime(time.Unix(3, 0)) + start := pcommon.NewTimestampFromTime(time.Unix(1, 0)) + end := pcommon.NewTimestampFromTime(time.Unix(3, 0)) expected := []telemetry.Metric{ telemetry.Count{ @@ -612,13 +615,13 @@ func TestTransformSum(t *testing.T) { } { - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("sum") m.SetDescription("description") m.SetUnit("1") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) d := m.Sum() - d.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + d.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dp := d.DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(end) @@ -626,13 +629,13 @@ func TestTransformSum(t *testing.T) { t.Run("Sum-Delta", func(t *testing.T) { testTransformMetric(t, m, expected) }) } { - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("sum") m.SetDescription("description") m.SetUnit("1") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) d := m.Sum() - d.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + d.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := d.DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(end) @@ -640,13 +643,13 @@ func TestTransformSum(t *testing.T) { t.Run("Sum-Cumulative", func(t *testing.T) { testTransformMetric(t, m, expectedGauge) }) } { - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("sum") m.SetDescription("description") m.SetUnit("1") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) d := m.Sum() - d.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + d.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dp := d.DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(end) @@ -654,13 +657,13 @@ func TestTransformSum(t *testing.T) { t.Run("IntSum-Delta", func(t *testing.T) { testTransformMetric(t, m, expected) }) } { - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("sum") m.SetDescription("description") m.SetUnit("1") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) d := m.Sum() - d.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + d.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := d.DataPoints().AppendEmpty() dp.SetStartTimestamp(start) dp.SetTimestamp(end) @@ -677,8 +680,8 @@ func TestTransformDeltaSummary(t *testing.T) { } func testTransformDeltaSummaryWithValues(t *testing.T, testName string, count uint64, sum float64, min float64, max float64) { - start := pdata.NewTimestampFromTime(time.Unix(1, 0)) - end := pdata.NewTimestampFromTime(time.Unix(3, 0)) + start := pcommon.NewTimestampFromTime(time.Unix(1, 0)) + end := pcommon.NewTimestampFromTime(time.Unix(3, 0)) expected := []telemetry.Metric{ telemetry.Summary{ @@ -724,11 +727,11 @@ func testTransformDeltaSummaryWithValues(t *testing.T, testName string, count ui } } - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("summary") m.SetDescription("description") m.SetUnit("s") - m.SetDataType(pdata.MetricDataTypeSummary) + m.SetDataType(pmetric.MetricDataTypeSummary) ds := m.Summary() dp := ds.DataPoints().AppendEmpty() dp.SetStartTimestamp(start) @@ -752,14 +755,14 @@ func testTransformDeltaSummaryWithValues(t *testing.T, testName string, count ui } func TestUnsupportedMetricTypes(t *testing.T) { - start := pdata.NewTimestampFromTime(time.Unix(1, 0)) - end := pdata.NewTimestampFromTime(time.Unix(3, 0)) + start := pcommon.NewTimestampFromTime(time.Unix(1, 0)) + end := pcommon.NewTimestampFromTime(time.Unix(3, 0)) { - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("no") m.SetDescription("no") m.SetUnit("1") - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) h := m.Histogram() dp := h.DataPoints().AppendEmpty() dp.SetStartTimestamp(start) @@ -768,7 +771,7 @@ func TestUnsupportedMetricTypes(t *testing.T) { dp.SetSum(8.0) dp.SetExplicitBounds([]float64{3, 7, 11}) dp.SetBucketCounts([]uint64{1, 1, 0, 0}) - h.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + h.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) t.Run("DoubleHistogram", func(t *testing.T) { testTransformMetricWithError(t, m, consumererror.NewPermanent(&errUnsupportedMetricType{})) @@ -777,7 +780,7 @@ func TestUnsupportedMetricTypes(t *testing.T) { } func TestTransformUnknownMetricType(t *testing.T) { - metric := pdata.NewMetric() + metric := pmetric.NewMetric() details := newMetricMetadata(context.Background()) transform := newTransformer(zap.NewNop(), &component.BuildInfo{ Command: testCollectorName, @@ -788,20 +791,20 @@ func TestTransformUnknownMetricType(t *testing.T) { require.NoError(t, err) assert.Nil(t, got) - assert.Equal(t, 1, details.metricMetadataCount[metricStatsKey{MetricType: pdata.MetricDataTypeNone}]) + assert.Equal(t, 1, details.metricMetadataCount[metricStatsKey{MetricType: pmetric.MetricDataTypeNone}]) } func TestTransformer_Log(t *testing.T) { tests := []struct { name string - logFunc func() pdata.LogRecord + logFunc func() plog.LogRecord want telemetry.Log }{ { name: "Basic Conversion", - logFunc: func() pdata.LogRecord { - log := pdata.NewLogRecord() - timestamp := pdata.NewTimestampFromTime(time.Unix(0, 0).UTC()) + logFunc: func() plog.LogRecord { + log := plog.NewLogRecord() + timestamp := pcommon.NewTimestampFromTime(time.Unix(0, 0).UTC()) log.SetTimestamp(timestamp) return log }, @@ -813,8 +816,8 @@ func TestTransformer_Log(t *testing.T) { }, { name: "With Log attributes", - logFunc: func() pdata.LogRecord { - log := pdata.NewLogRecord() + logFunc: func() plog.LogRecord { + log := plog.NewLogRecord() log.Attributes().InsertString("foo", "bar") log.Body().SetStringVal("Hello World") return log @@ -827,9 +830,9 @@ func TestTransformer_Log(t *testing.T) { }, { name: "With severity number", - logFunc: func() pdata.LogRecord { - log := pdata.NewLogRecord() - log.SetSeverityNumber(pdata.SeverityNumberWARN) + logFunc: func() plog.LogRecord { + log := plog.NewLogRecord() + log.SetSeverityNumber(plog.SeverityNumberWARN) log.Body().SetStringVal("bloopbleep") return log }, @@ -841,8 +844,8 @@ func TestTransformer_Log(t *testing.T) { }, { name: "With severity text", - logFunc: func() pdata.LogRecord { - log := pdata.NewLogRecord() + logFunc: func() plog.LogRecord { + log := plog.NewLogRecord() log.SetSeverityText("SEVERE") log.Body().SetStringVal("bloopbleep") return log @@ -855,11 +858,11 @@ func TestTransformer_Log(t *testing.T) { }, { name: "With traceID and spanID", - logFunc: func() pdata.LogRecord { - log := pdata.NewLogRecord() - timestamp := pdata.NewTimestampFromTime(time.Unix(0, 0).UTC()) - log.SetTraceID(pdata.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - log.SetSpanID(pdata.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) + logFunc: func() plog.LogRecord { + log := plog.NewLogRecord() + timestamp := pcommon.NewTimestampFromTime(time.Unix(0, 0).UTC()) + log.SetTraceID(pcommon.NewTraceID([...]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + log.SetSpanID(pcommon.NewSpanID([...]byte{0, 0, 0, 0, 0, 0, 0, 1})) log.SetTimestamp(timestamp) return log }, @@ -874,9 +877,9 @@ func TestTransformer_Log(t *testing.T) { }, { name: "With dropped attribute count", - logFunc: func() pdata.LogRecord { - log := pdata.NewLogRecord() - timestamp := pdata.NewTimestampFromTime(time.Unix(0, 0).UTC()) + logFunc: func() plog.LogRecord { + log := plog.NewLogRecord() + timestamp := pcommon.NewTimestampFromTime(time.Unix(0, 0).UTC()) log.SetTimestamp(timestamp) log.SetDroppedAttributesCount(4) return log @@ -901,7 +904,7 @@ func TestTransformer_Log(t *testing.T) { } func TestCaptureLogAttributeMetadata(t *testing.T) { - log := pdata.NewLogRecord() + log := plog.NewLogRecord() log.Attributes().InsertString("foo", "bar") log.Body().SetStringVal("Hello World") @@ -911,11 +914,11 @@ func TestCaptureLogAttributeMetadata(t *testing.T) { require.NoError(t, err) assert.Equal(t, 1, len(details.attributeMetadataCount)) - assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationLog, attributeType: pdata.ValueTypeString}]) + assert.Equal(t, 1, details.attributeMetadataCount[attributeStatsKey{location: attributeLocationLog, attributeType: pcommon.ValueTypeString}]) } func TestDoesNotCaptureLogAttributeMetadata(t *testing.T) { - log := pdata.NewLogRecord() + log := plog.NewLogRecord() log.Body().SetStringVal("Hello World") details := newLogMetadata(context.TODO()) diff --git a/exporter/observiqexporter/client.go b/exporter/observiqexporter/client.go index 0570be19bfef..663edd79d313 100644 --- a/exporter/observiqexporter/client.go +++ b/exporter/observiqexporter/client.go @@ -29,7 +29,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -50,7 +50,7 @@ type client struct { func (c *client) sendLogs( ctx context.Context, - ld pdata.Logs, + ld plog.Logs, ) error { c.wg.Add(1) defer c.wg.Done() diff --git a/exporter/observiqexporter/client_test.go b/exporter/observiqexporter/client_test.go index 3b7becce3eb1..67c2ec0d54fb 100644 --- a/exporter/observiqexporter/client_test.go +++ b/exporter/observiqexporter/client_test.go @@ -29,8 +29,9 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -83,8 +84,8 @@ func newTestClient(config *Config, httpClient *http.Client) *client { } } -func createLogData() pdata.Logs { - logs := pdata.NewLogs() +func createLogData() plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().EnsureCapacity(1) now := timeNow() @@ -97,7 +98,7 @@ func createLogData() pdata.Logs { logRecord := sl.LogRecords().AppendEmpty() - logRecord.SetTimestamp(pdata.Timestamp(now.UnixNano())) + logRecord.SetTimestamp(pcommon.Timestamp(now.UnixNano())) logRecord.Body().SetStringVal("message") logRecord.Attributes().InsertString(conventions.AttributeNetHostIP, "1.1.1.1") logRecord.Attributes().InsertInt(conventions.AttributeNetHostPort, 4000) @@ -134,7 +135,7 @@ func verifyFirstElementIsEntryFunc(e observIQLogEntry) requestVerificationFunc { func TestClientSendLogs(t *testing.T) { type testCaseRequest struct { // Inputs - logs pdata.Logs + logs plog.Logs responseStatus int respBody string timeoutTimer bool // Timeout the last set timer created through timeAfterFunc() diff --git a/exporter/observiqexporter/converter.go b/exporter/observiqexporter/converter.go index fc8a491589ca..ab734bfa89e6 100644 --- a/exporter/observiqexporter/converter.go +++ b/exporter/observiqexporter/converter.go @@ -21,7 +21,8 @@ import ( "strings" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) // Type preEncodedJSON aliases []byte, represents JSON that has already been encoded @@ -58,8 +59,8 @@ type observIQLogEntry struct { var fnvHash = fnv.New128a() var fnvHashOut = make([]byte, 0, 16) -// Convert pdata.Logs to observIQLogBatch -func logdataToObservIQFormat(ld pdata.Logs, agentID string, agentName string, buildVersion string) (*observIQLogBatch, []error) { +// Convert plog.Logs to observIQLogBatch +func logdataToObservIQFormat(ld plog.Logs, agentID string, agentName string, buildVersion string) (*observIQLogBatch, []error) { var rls = ld.ResourceLogs() var sliceOut = make([]*observIQLog, 0, ld.LogRecordCount()) var errorsOut = make([]error, 0) @@ -111,7 +112,7 @@ func logdataToObservIQFormat(ld pdata.Logs, agentID string, agentName string, bu // Output timestamp format, an ISO8601 compliant timestamp with millisecond precision const timestampFieldOutputLayout = "2006-01-02T15:04:05.000Z07:00" -func resourceAndInstrumentationLogToEntry(resMap map[string]interface{}, log pdata.LogRecord, agentID string, agentName string, buildVersion string) *observIQLogEntry { +func resourceAndInstrumentationLogToEntry(resMap map[string]interface{}, log plog.LogRecord, agentID string, agentName string, buildVersion string) *observIQLogEntry { return &observIQLogEntry{ Timestamp: timestampFromRecord(log), Severity: severityFromRecord(log), @@ -123,15 +124,15 @@ func resourceAndInstrumentationLogToEntry(resMap map[string]interface{}, log pda } } -func timestampFromRecord(log pdata.LogRecord) string { +func timestampFromRecord(log plog.LogRecord) string { if log.Timestamp() == 0 { return timeNow().UTC().Format(timestampFieldOutputLayout) } return log.Timestamp().AsTime().UTC().Format(timestampFieldOutputLayout) } -func messageFromRecord(log pdata.LogRecord) string { - if log.Body().Type() == pdata.ValueTypeString { +func messageFromRecord(log plog.LogRecord) string { + if log.Body().Type() == pcommon.ValueTypeString { return log.Body().StringVal() } @@ -139,8 +140,8 @@ func messageFromRecord(log pdata.LogRecord) string { } // bodyFromRecord returns what the "body" field should be on the observiq entry from the given LogRecord. -func bodyFromRecord(log pdata.LogRecord) interface{} { - if log.Body().Type() != pdata.ValueTypeString { +func bodyFromRecord(log plog.LogRecord) interface{} { + if log.Body().Type() != pcommon.ValueTypeString { return attributeValueToBaseType(log.Body()) } return nil @@ -181,7 +182,7 @@ var severityNumberToObservIQName = map[int32]string{ representing the opentelemetry defined severity. If there is no severity number, we use "default" */ -func severityFromRecord(log pdata.LogRecord) string { +func severityFromRecord(log plog.LogRecord) string { var sevAsInt32 = int32(log.SeverityNumber()) if sevAsInt32 < int32(len(severityNumberToObservIQName)) && sevAsInt32 >= 0 { return severityNumberToObservIQName[sevAsInt32] @@ -192,9 +193,9 @@ func severityFromRecord(log pdata.LogRecord) string { /* Transform AttributeMap to native Go map, skipping keys with nil values, and replacing dots in keys with _ */ -func attributeMapToBaseType(m pdata.Map) map[string]interface{} { +func attributeMapToBaseType(m pcommon.Map) map[string]interface{} { mapOut := make(map[string]interface{}, m.Len()) - m.Range(func(k string, v pdata.Value) bool { + m.Range(func(k string, v pcommon.Value) bool { val := attributeValueToBaseType(v) if val != nil { dedotedKey := strings.ReplaceAll(k, ".", "_") @@ -208,20 +209,20 @@ func attributeMapToBaseType(m pdata.Map) map[string]interface{} { /* attrib is the attribute value to convert to it's native Go type - skips nils in arrays/maps */ -func attributeValueToBaseType(attrib pdata.Value) interface{} { +func attributeValueToBaseType(attrib pcommon.Value) interface{} { switch attrib.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return attrib.StringVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return attrib.BoolVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return attrib.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return attrib.DoubleVal() - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: attribMap := attrib.MapVal() return attributeMapToBaseType(attribMap) - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: arrayVal := attrib.SliceVal() slice := make([]interface{}, 0, arrayVal.Len()) for i := 0; i < arrayVal.Len(); i++ { @@ -231,7 +232,7 @@ func attributeValueToBaseType(attrib pdata.Value) interface{} { } } return slice - case pdata.ValueTypeEmpty: + case pcommon.ValueTypeEmpty: return nil } return nil diff --git a/exporter/observiqexporter/converter_test.go b/exporter/observiqexporter/converter_test.go index 1d07a3b06c65..2b3810ce612d 100644 --- a/exporter/observiqexporter/converter_test.go +++ b/exporter/observiqexporter/converter_test.go @@ -22,18 +22,19 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) -func resourceAndLogRecordsToLogs(r pdata.Resource, lrs []pdata.LogRecord) pdata.Logs { - logs := pdata.NewLogs() +func resourceAndLogRecordsToLogs(r pcommon.Resource, lrs []plog.LogRecord) plog.Logs { + logs := plog.NewLogs() resLogs := logs.ResourceLogs() resLog := resLogs.AppendEmpty() resLogRes := resLog.Resource() - r.Attributes().Range(func(k string, v pdata.Value) bool { + r.Attributes().Range(func(k string, v pcommon.Value) bool { resLogRes.Attributes().Insert(k, v) return true }) @@ -50,15 +51,15 @@ func resourceAndLogRecordsToLogs(r pdata.Resource, lrs []pdata.LogRecord) pdata. func TestLogdataToObservIQFormat(t *testing.T) { ts := time.Date(2021, 12, 11, 10, 9, 8, 1, time.UTC) stringTs := "2021-12-11T10:09:08.000Z" - nanoTs := pdata.Timestamp(ts.UnixNano()) + nanoTs := pcommon.Timestamp(ts.UnixNano()) timeNow = func() time.Time { return ts } testCases := []struct { name string - logRecordFn func() pdata.LogRecord - logResourceFn func() pdata.Resource + logRecordFn func() plog.LogRecord + logResourceFn func() pcommon.Resource agentName string agentID string output observIQLogEntry @@ -66,8 +67,8 @@ func TestLogdataToObservIQFormat(t *testing.T) { }{ { "Happy path with string attributes", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("Message") logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") @@ -75,7 +76,7 @@ func TestLogdataToObservIQFormat(t *testing.T) { logRecord.SetTimestamp(nanoTs) return logRecord }, - pdata.NewResource, + pcommon.NewResource, "agent", "agentID", observIQLogEntry{ @@ -93,8 +94,8 @@ func TestLogdataToObservIQFormat(t *testing.T) { }, { "works with attributes of all types", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("Message") logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") logRecord.Attributes().InsertBool("bool", true) @@ -102,11 +103,11 @@ func TestLogdataToObservIQFormat(t *testing.T) { logRecord.Attributes().InsertInt("int", 3) logRecord.Attributes().InsertNull("null") - mapVal := pdata.NewValueMap() - mapVal.MapVal().Insert("mapKey", pdata.NewValueString("value")) + mapVal := pcommon.NewValueMap() + mapVal.MapVal().Insert("mapKey", pcommon.NewValueString("value")) logRecord.Attributes().Insert("map", mapVal) - arrVal := pdata.NewValueSlice() + arrVal := pcommon.NewValueSlice() arrVal.SliceVal().EnsureCapacity(2) arrVal.SliceVal().AppendEmpty().SetIntVal(1) arrVal.SliceVal().AppendEmpty().SetIntVal(2) @@ -115,20 +116,20 @@ func TestLogdataToObservIQFormat(t *testing.T) { logRecord.SetTimestamp(nanoTs) return logRecord }, - func() pdata.Resource { - res := pdata.NewResource() + func() pcommon.Resource { + res := pcommon.NewResource() res.Attributes().InsertBool("bool", true) res.Attributes().InsertString("string", "string") res.Attributes().InsertInt("int", 1) res.Attributes().InsertNull("null") - mapVal := pdata.NewValueMap() + mapVal := pcommon.NewValueMap() mapVal.MapVal().InsertDouble("double", 1.1) mapVal.MapVal().InsertBool("bool", false) mapVal.MapVal().InsertNull("null") res.Attributes().Insert("map", mapVal) - arrVal := pdata.NewValueSlice() + arrVal := pcommon.NewValueSlice() arrVal.SliceVal().EnsureCapacity(2) arrVal.SliceVal().AppendEmpty().SetIntVal(1) arrVal.SliceVal().AppendEmpty().SetDoubleVal(2.0) @@ -171,12 +172,12 @@ func TestLogdataToObservIQFormat(t *testing.T) { }, { "Body is nil", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.SetTimestamp(nanoTs) return logRecord }, - pdata.NewResource, + pcommon.NewResource, "agent", "agentID", observIQLogEntry{ @@ -190,17 +191,17 @@ func TestLogdataToObservIQFormat(t *testing.T) { }, { "Body is map", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() - mapVal := pdata.NewValueMap() - mapVal.MapVal().Insert("mapKey", pdata.NewValueString("value")) + mapVal := pcommon.NewValueMap() + mapVal.MapVal().Insert("mapKey", pcommon.NewValueString("value")) mapVal.CopyTo(logRecord.Body()) logRecord.SetTimestamp(nanoTs) return logRecord }, - pdata.NewResource, + pcommon.NewResource, "agent", "agentID", observIQLogEntry{ @@ -216,10 +217,10 @@ func TestLogdataToObservIQFormat(t *testing.T) { }, { "Body is array", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() - pdata.NewValueSlice().CopyTo(logRecord.Body()) + pcommon.NewValueSlice().CopyTo(logRecord.Body()) logRecord.Body().SliceVal().EnsureCapacity(2) logRecord.Body().SliceVal().AppendEmpty().SetStringVal("string") logRecord.Body().SliceVal().AppendEmpty().SetDoubleVal(1.0) @@ -227,7 +228,7 @@ func TestLogdataToObservIQFormat(t *testing.T) { logRecord.SetTimestamp(nanoTs) return logRecord }, - pdata.NewResource, + pcommon.NewResource, "agent", "agentID", observIQLogEntry{ @@ -244,15 +245,15 @@ func TestLogdataToObservIQFormat(t *testing.T) { }, { "Body is an int", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetIntVal(1) logRecord.SetTimestamp(nanoTs) return logRecord }, - pdata.NewResource, + pcommon.NewResource, "agent", "agentID", observIQLogEntry{ @@ -266,11 +267,11 @@ func TestLogdataToObservIQFormat(t *testing.T) { }, { "Body and attributes are maps", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() - bodyMapVal := pdata.NewValueMap() - bodyMapVal.MapVal().Insert("mapKey", pdata.NewValueString("body")) + bodyMapVal := pcommon.NewValueMap() + bodyMapVal.MapVal().Insert("mapKey", pcommon.NewValueString("body")) bodyMapVal.CopyTo(logRecord.Body()) logRecord.Attributes().InsertString("attrib", "logAttrib") @@ -278,7 +279,7 @@ func TestLogdataToObservIQFormat(t *testing.T) { logRecord.SetTimestamp(nanoTs) return logRecord }, - pdata.NewResource, + pcommon.NewResource, "agent", "agentID", observIQLogEntry{ @@ -296,12 +297,12 @@ func TestLogdataToObservIQFormat(t *testing.T) { }, { "No timestamp on record", - func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("Message") return logRecord }, - pdata.NewResource, + pcommon.NewResource, "agent", "agentID", observIQLogEntry{ @@ -317,7 +318,7 @@ func TestLogdataToObservIQFormat(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - logs := resourceAndLogRecordsToLogs(testCase.logResourceFn(), []pdata.LogRecord{testCase.logRecordFn()}) + logs := resourceAndLogRecordsToLogs(testCase.logResourceFn(), []plog.LogRecord{testCase.logRecordFn()}) res, errs := logdataToObservIQFormat( logs, testCase.agentID, diff --git a/exporter/observiqexporter/go.mod b/exporter/observiqexporter/go.mod index 34cd17e20109..c310760145a8 100644 --- a/exporter/observiqexporter/go.mod +++ b/exporter/observiqexporter/go.mod @@ -5,34 +5,29 @@ go 1.17 require ( github.com/google/uuid v1.3.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/observiqexporter/go.sum b/exporter/observiqexporter/go.sum index c2d9d2f9529b..56e28a233646 100644 --- a/exporter/observiqexporter/go.sum +++ b/exporter/observiqexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,35 +15,23 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -63,18 +48,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -84,15 +65,12 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -122,8 +100,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -160,19 +138,14 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -182,20 +155,21 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -219,20 +193,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -248,21 +218,17 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -283,22 +249,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -308,18 +268,12 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/opencensusexporter/go.mod b/exporter/opencensusexporter/go.mod index 39e7e53a2304..134c4d407395 100644 --- a/exporter/opencensusexporter/go.mod +++ b/exporter/opencensusexporter/go.mod @@ -9,13 +9,13 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 google.golang.org/grpc v1.45.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -25,7 +25,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -34,8 +34,8 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect github.com/soheilhy/cmux v0.1.5 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -43,7 +43,7 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf // indirect @@ -67,3 +67,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/share replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/opencensusreceiver => ../../receiver/opencensusreceiver + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/opencensusexporter/go.sum b/exporter/opencensusexporter/go.sum index 6faaf8250093..e44726406e15 100644 --- a/exporter/opencensusexporter/go.sum +++ b/exporter/opencensusexporter/go.sum @@ -21,8 +21,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -106,7 +106,6 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -141,8 +140,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -199,8 +198,6 @@ github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmR github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -216,10 +213,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -273,8 +272,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= diff --git a/exporter/opencensusexporter/opencensus.go b/exporter/opencensusexporter/opencensus.go index 5a33250777d2..01f5393d2b6b 100644 --- a/exporter/opencensusexporter/opencensus.go +++ b/exporter/opencensusexporter/opencensus.go @@ -24,7 +24,8 @@ import ( agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -150,7 +151,7 @@ func newMetricsExporter(ctx context.Context, cfg *Config, settings component.Tel return oce, nil } -func (oce *ocExporter) pushTraces(_ context.Context, td pdata.Traces) error { +func (oce *ocExporter) pushTraces(_ context.Context, td ptrace.Traces) error { // Get first available trace Client. tClient, ok := <-oce.tracesClients if !ok { @@ -199,7 +200,7 @@ func (oce *ocExporter) pushTraces(_ context.Context, td pdata.Traces) error { return nil } -func (oce *ocExporter) pushMetrics(_ context.Context, md pdata.Metrics) error { +func (oce *ocExporter) pushMetrics(_ context.Context, md pmetric.Metrics) error { // Get first available mClient. mClient, ok := <-oce.metricsClients if !ok { diff --git a/exporter/parquetexporter/exporter.go b/exporter/parquetexporter/exporter.go index d80b2830534f..b4eac8c88225 100644 --- a/exporter/parquetexporter/exporter.go +++ b/exporter/parquetexporter/exporter.go @@ -18,7 +18,9 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) type parquetExporter struct { @@ -33,14 +35,14 @@ func (e parquetExporter) shutdown(ctx context.Context) error { return nil } -func (e parquetExporter) consumeMetrics(ctx context.Context, ld pdata.Metrics) error { +func (e parquetExporter) consumeMetrics(ctx context.Context, ld pmetric.Metrics) error { return nil } -func (e parquetExporter) consumeTraces(ctx context.Context, ld pdata.Traces) error { +func (e parquetExporter) consumeTraces(ctx context.Context, ld ptrace.Traces) error { return nil } -func (e parquetExporter) consumeLogs(ctx context.Context, ld pdata.Logs) error { +func (e parquetExporter) consumeLogs(ctx context.Context, ld plog.Logs) error { return nil } diff --git a/exporter/parquetexporter/go.mod b/exporter/parquetexporter/go.mod index b86899c0520a..7aeb42f356e7 100644 --- a/exporter/parquetexporter/go.mod +++ b/exporter/parquetexporter/go.mod @@ -3,22 +3,20 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/exporter/parque go 1.17 require ( - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -26,10 +24,6 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/parquetexporter/go.sum b/exporter/parquetexporter/go.sum index 28c269e10bb6..b67d370d0391 100644 --- a/exporter/parquetexporter/go.sum +++ b/exporter/parquetexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,35 +15,23 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -63,18 +48,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -83,12 +63,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -117,8 +95,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -154,18 +132,13 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -175,18 +148,17 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -210,20 +182,15 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -239,22 +206,17 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -275,22 +237,14 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -300,17 +254,11 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporter/prometheusexporter/accumulator.go b/exporter/prometheusexporter/accumulator.go index c68f45219d85..2ccd5c7e4f19 100644 --- a/exporter/prometheusexporter/accumulator.go +++ b/exporter/prometheusexporter/accumulator.go @@ -20,30 +20,31 @@ import ( "sync" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) type accumulatedValue struct { // value contains a metric with exactly one aggregated datapoint. - value pdata.Metric + value pmetric.Metric // resourceAttrs contain the resource attributes. They are used to output instance and job labels. - resourceAttrs pdata.Map + resourceAttrs pcommon.Map // updated indicates when metric was last changed. updated time.Time - instrumentationLibrary pdata.InstrumentationScope + instrumentationLibrary pcommon.InstrumentationScope } // accumulator stores aggragated values of incoming metrics type accumulator interface { // Accumulate stores aggragated metric values - Accumulate(resourceMetrics pdata.ResourceMetrics) (processed int) + Accumulate(resourceMetrics pmetric.ResourceMetrics) (processed int) // Collect returns a slice with relevant aggregated metrics and their resource attributes. // The number or metrics and attributes returned will be the same. - Collect() (metrics []pdata.Metric, resourceAttrs []pdata.Map) + Collect() (metrics []pmetric.Metric, resourceAttrs []pcommon.Map) } // LastValueAccumulator keeps last value for accumulated metrics @@ -66,7 +67,7 @@ func newAccumulator(logger *zap.Logger, metricExpiration time.Duration) accumula } // Accumulate stores one datapoint per metric -func (a *lastValueAccumulator) Accumulate(rm pdata.ResourceMetrics) (n int) { +func (a *lastValueAccumulator) Accumulate(rm pmetric.ResourceMetrics) (n int) { now := time.Now() ilms := rm.ScopeMetrics() resourceAttrs := rm.Resource().Attributes() @@ -83,17 +84,17 @@ func (a *lastValueAccumulator) Accumulate(rm pdata.ResourceMetrics) (n int) { return } -func (a *lastValueAccumulator) addMetric(metric pdata.Metric, il pdata.InstrumentationScope, resourceAttrs pdata.Map, now time.Time) int { +func (a *lastValueAccumulator) addMetric(metric pmetric.Metric, il pcommon.InstrumentationScope, resourceAttrs pcommon.Map, now time.Time) int { a.logger.Debug(fmt.Sprintf("accumulating metric: %s", metric.Name())) switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return a.accumulateGauge(metric, il, resourceAttrs, now) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return a.accumulateSum(metric, il, resourceAttrs, now) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return a.accumulateDoubleHistogram(metric, il, resourceAttrs, now) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return a.accumulateSummary(metric, il, resourceAttrs, now) default: a.logger.With( @@ -105,13 +106,13 @@ func (a *lastValueAccumulator) addMetric(metric pdata.Metric, il pdata.Instrumen return 0 } -func (a *lastValueAccumulator) accumulateSummary(metric pdata.Metric, il pdata.InstrumentationScope, resourceAttrs pdata.Map, now time.Time) (n int) { +func (a *lastValueAccumulator) accumulateSummary(metric pmetric.Metric, il pcommon.InstrumentationScope, resourceAttrs pcommon.Map, now time.Time) (n int) { dps := metric.Summary().DataPoints() for i := 0; i < dps.Len(); i++ { ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes()) - if ip.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if ip.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { a.registeredMetrics.Delete(signature) return 0 } @@ -134,13 +135,13 @@ func (a *lastValueAccumulator) accumulateSummary(metric pdata.Metric, il pdata.I return n } -func (a *lastValueAccumulator) accumulateGauge(metric pdata.Metric, il pdata.InstrumentationScope, resourceAttrs pdata.Map, now time.Time) (n int) { +func (a *lastValueAccumulator) accumulateGauge(metric pmetric.Metric, il pcommon.InstrumentationScope, resourceAttrs pcommon.Map, now time.Time) (n int) { dps := metric.Gauge().DataPoints() for i := 0; i < dps.Len(); i++ { ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes()) - if ip.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if ip.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { a.registeredMetrics.Delete(signature) return 0 } @@ -168,11 +169,11 @@ func (a *lastValueAccumulator) accumulateGauge(metric pdata.Metric, il pdata.Ins return } -func (a *lastValueAccumulator) accumulateSum(metric pdata.Metric, il pdata.InstrumentationScope, resourceAttrs pdata.Map, now time.Time) (n int) { +func (a *lastValueAccumulator) accumulateSum(metric pmetric.Metric, il pcommon.InstrumentationScope, resourceAttrs pcommon.Map, now time.Time) (n int) { doubleSum := metric.Sum() // Drop metrics with non-cumulative aggregations - if doubleSum.AggregationTemporality() != pdata.MetricAggregationTemporalityCumulative { + if doubleSum.AggregationTemporality() != pmetric.MetricAggregationTemporalityCumulative { return } @@ -181,7 +182,7 @@ func (a *lastValueAccumulator) accumulateSum(metric pdata.Metric, il pdata.Instr ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes()) - if ip.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if ip.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { a.registeredMetrics.Delete(signature) return 0 } @@ -190,7 +191,7 @@ func (a *lastValueAccumulator) accumulateSum(metric pdata.Metric, il pdata.Instr if !ok { m := createMetric(metric) m.Sum().SetIsMonotonic(metric.Sum().IsMonotonic()) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) ip.CopyTo(m.Sum().DataPoints().AppendEmpty()) a.registeredMetrics.Store(signature, &accumulatedValue{value: m, resourceAttrs: resourceAttrs, instrumentationLibrary: il, updated: now}) n++ @@ -205,7 +206,7 @@ func (a *lastValueAccumulator) accumulateSum(metric pdata.Metric, il pdata.Instr m := createMetric(metric) m.Sum().SetIsMonotonic(metric.Sum().IsMonotonic()) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) ip.CopyTo(m.Sum().DataPoints().AppendEmpty()) a.registeredMetrics.Store(signature, &accumulatedValue{value: m, resourceAttrs: resourceAttrs, instrumentationLibrary: il, updated: now}) n++ @@ -213,11 +214,11 @@ func (a *lastValueAccumulator) accumulateSum(metric pdata.Metric, il pdata.Instr return } -func (a *lastValueAccumulator) accumulateDoubleHistogram(metric pdata.Metric, il pdata.InstrumentationScope, resourceAttrs pdata.Map, now time.Time) (n int) { +func (a *lastValueAccumulator) accumulateDoubleHistogram(metric pmetric.Metric, il pcommon.InstrumentationScope, resourceAttrs pcommon.Map, now time.Time) (n int) { doubleHistogram := metric.Histogram() // Drop metrics with non-cumulative aggregations - if doubleHistogram.AggregationTemporality() != pdata.MetricAggregationTemporalityCumulative { + if doubleHistogram.AggregationTemporality() != pmetric.MetricAggregationTemporalityCumulative { return } @@ -226,7 +227,7 @@ func (a *lastValueAccumulator) accumulateDoubleHistogram(metric pdata.Metric, il ip := dps.At(i) signature := timeseriesSignature(il.Name(), metric, ip.Attributes()) - if ip.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if ip.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { a.registeredMetrics.Delete(signature) return 0 } @@ -248,7 +249,7 @@ func (a *lastValueAccumulator) accumulateDoubleHistogram(metric pdata.Metric, il m := createMetric(metric) ip.CopyTo(m.Histogram().DataPoints().AppendEmpty()) - m.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) a.registeredMetrics.Store(signature, &accumulatedValue{value: m, resourceAttrs: resourceAttrs, instrumentationLibrary: il, updated: now}) n++ } @@ -256,11 +257,11 @@ func (a *lastValueAccumulator) accumulateDoubleHistogram(metric pdata.Metric, il } // Collect returns a slice with relevant aggregated metrics and their resource attributes. -func (a *lastValueAccumulator) Collect() ([]pdata.Metric, []pdata.Map) { +func (a *lastValueAccumulator) Collect() ([]pmetric.Metric, []pcommon.Map) { a.logger.Debug("Accumulator collect called") - var metrics []pdata.Metric - var resourceAttrs []pdata.Map + var metrics []pmetric.Metric + var resourceAttrs []pcommon.Map expirationTime := time.Now().Add(-a.metricExpiration) a.registeredMetrics.Range(func(key, value interface{}) bool { @@ -279,20 +280,20 @@ func (a *lastValueAccumulator) Collect() ([]pdata.Metric, []pdata.Map) { return metrics, resourceAttrs } -func timeseriesSignature(ilmName string, metric pdata.Metric, attributes pdata.Map) string { +func timeseriesSignature(ilmName string, metric pmetric.Metric, attributes pcommon.Map) string { var b strings.Builder b.WriteString(metric.DataType().String()) b.WriteString("*" + ilmName) b.WriteString("*" + metric.Name()) - attributes.Sort().Range(func(k string, v pdata.Value) bool { + attributes.Sort().Range(func(k string, v pcommon.Value) bool { b.WriteString("*" + k + "*" + v.AsString()) return true }) return b.String() } -func createMetric(metric pdata.Metric) pdata.Metric { - m := pdata.NewMetric() +func createMetric(metric pmetric.Metric) pmetric.Metric { + m := pmetric.NewMetric() m.SetName(metric.Name()) m.SetDescription(metric.Description()) m.SetUnit(metric.Unit()) diff --git a/exporter/prometheusexporter/accumulator_test.go b/exporter/prometheusexporter/accumulator_test.go index 274887233f11..bc7b6feeab87 100644 --- a/exporter/prometheusexporter/accumulator_test.go +++ b/exporter/prometheusexporter/accumulator_test.go @@ -21,55 +21,56 @@ import ( "time" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) func TestInvalidDataType(t *testing.T) { a := newAccumulator(zap.NewNop(), 1*time.Hour).(*lastValueAccumulator) - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetDataType(-100) - n := a.addMetric(metric, pdata.NewInstrumentationScope(), pdata.NewMap(), time.Now()) + n := a.addMetric(metric, pcommon.NewInstrumentationScope(), pcommon.NewMap(), time.Now()) require.Zero(t, n) } func TestAccumulateDeltaAggregation(t *testing.T) { tests := []struct { name string - fillMetric func(time.Time, pdata.Metric) + fillMetric func(time.Time, pmetric.Metric) }{ { name: "IntSum", - fillMetric: func(ts time.Time, metric pdata.Metric) { + fillMetric: func(ts time.Time, metric pmetric.Metric) { metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "Sum", - fillMetric: func(ts time.Time, metric pdata.Metric) { + fillMetric: func(ts time.Time, metric pmetric.Metric) { metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(42.42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "Histogram", - fillMetric: func(ts time.Time, metric pdata.Metric) { + fillMetric: func(ts time.Time, metric pmetric.Metric) { metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) metric.SetDescription("test description") dp := metric.Histogram().DataPoints().AppendEmpty() dp.SetBucketCounts([]uint64{5, 2}) @@ -78,14 +79,14 @@ func TestAccumulateDeltaAggregation(t *testing.T) { dp.SetSum(42.42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - resourceMetrics := pdata.NewResourceMetrics() + resourceMetrics := pmetric.NewResourceMetrics() ilm := resourceMetrics.ScopeMetrics().AppendEmpty() ilm.Scope().SetName("test") tt.fillMetric(time.Now(), ilm.Metrics().AppendEmpty()) @@ -94,7 +95,7 @@ func TestAccumulateDeltaAggregation(t *testing.T) { n := a.Accumulate(resourceMetrics) require.Equal(t, 0, n) - signature := timeseriesSignature(ilm.Scope().Name(), ilm.Metrics().At(0), pdata.NewMap()) + signature := timeseriesSignature(ilm.Scope().Name(), ilm.Metrics().At(0), pcommon.NewMap()) v, ok := a.registeredMetrics.Load(signature) require.False(t, ok) require.Nil(t, v) @@ -105,107 +106,107 @@ func TestAccumulateDeltaAggregation(t *testing.T) { func TestAccumulateMetrics(t *testing.T) { tests := []struct { name string - metric func(time.Time, float64, pdata.MetricSlice) + metric func(time.Time, float64, pmetric.MetricSlice) }{ { name: "IntGauge", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetIntVal(int64(v)) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "Gauge", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(v) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "IntSum", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.SetDescription("test description") metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(int64(v)) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "Sum", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.SetDescription("test description") metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(v) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "MonotonicIntSum", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.SetDescription("test description") metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(int64(v)) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "MonotonicSum", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(v) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "Histogram", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Histogram().DataPoints().AppendEmpty() dp.SetBucketCounts([]uint64{5, 2}) @@ -214,15 +215,15 @@ func TestAccumulateMetrics(t *testing.T) { dp.SetSum(v) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) }, }, { name: "Summary", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) metric.SetDescription("test description") dp := metric.Summary().DataPoints().AppendEmpty() dp.SetCount(10) @@ -230,8 +231,8 @@ func TestAccumulateMetrics(t *testing.T) { dp.SetCount(10) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) - fillQuantileValue := func(pN, value float64, dest pdata.ValueAtQuantile) { + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + fillQuantileValue := func(pN, value float64, dest pmetric.ValueAtQuantile) { dest.SetQuantile(pN) dest.SetValue(value) } @@ -241,43 +242,43 @@ func TestAccumulateMetrics(t *testing.T) { }, { name: "StalenessMarkerGauge", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(v) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) - dp.SetFlags(pdata.MetricDataPointFlags(pdata.MetricDataPointFlagNoRecordedValue)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.SetFlags(pmetric.MetricDataPointFlags(pmetric.MetricDataPointFlagNoRecordedValue)) }, }, { name: "StalenessMarkerSum", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.SetDescription("test description") metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(v) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) - dp.SetFlags(pdata.MetricDataPointFlags(pdata.MetricDataPointFlagNoRecordedValue)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.SetFlags(pmetric.MetricDataPointFlags(pmetric.MetricDataPointFlagNoRecordedValue)) }, }, { name: "StalenessMarkerHistogram", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Histogram().DataPoints().AppendEmpty() dp.SetBucketCounts([]uint64{5, 2}) @@ -286,16 +287,16 @@ func TestAccumulateMetrics(t *testing.T) { dp.SetSum(v) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) - dp.SetFlags(pdata.MetricDataPointFlags(pdata.MetricDataPointFlagNoRecordedValue)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.SetFlags(pmetric.MetricDataPointFlags(pmetric.MetricDataPointFlagNoRecordedValue)) }, }, { name: "StalenessMarkerSummary", - metric: func(ts time.Time, v float64, metrics pdata.MetricSlice) { + metric: func(ts time.Time, v float64, metrics pmetric.MetricSlice) { metric := metrics.AppendEmpty() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) metric.SetDescription("test description") dp := metric.Summary().DataPoints().AppendEmpty() dp.SetCount(10) @@ -303,9 +304,9 @@ func TestAccumulateMetrics(t *testing.T) { dp.SetCount(10) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) - dp.SetFlags(pdata.MetricDataPointFlags(pdata.MetricDataPointFlagNoRecordedValue)) - fillQuantileValue := func(pN, value float64, dest pdata.ValueAtQuantile) { + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) + dp.SetFlags(pmetric.MetricDataPointFlags(pmetric.MetricDataPointFlagNoRecordedValue)) + fillQuantileValue := func(pN, value float64, dest pmetric.ValueAtQuantile) { dest.SetQuantile(pN) dest.SetValue(value) } @@ -322,7 +323,7 @@ func TestAccumulateMetrics(t *testing.T) { ts2 := time.Now().Add(-2 * time.Second) ts3 := time.Now().Add(-1 * time.Second) - resourceMetrics2 := pdata.NewResourceMetrics() + resourceMetrics2 := pmetric.NewResourceMetrics() ilm2 := resourceMetrics2.ScopeMetrics().AppendEmpty() ilm2.Scope().SetName("test") tt.metric(ts2, 21, ilm2.Metrics()) @@ -349,7 +350,7 @@ func TestAccumulateMetrics(t *testing.T) { require.Equal(t, v.instrumentationLibrary.Name(), "test") require.Equal(t, v.value.DataType(), ilm2.Metrics().At(0).DataType()) - vLabels.Range(func(k string, v pdata.Value) bool { + vLabels.Range(func(k string, v pcommon.Value) bool { r, _ := m2Labels.Get(k) require.Equal(t, r, v) return true @@ -362,7 +363,7 @@ func TestAccumulateMetrics(t *testing.T) { require.Equal(t, m2IsMonotonic, vIsMonotonic) // 3 metrics arrived - resourceMetrics3 := pdata.NewResourceMetrics() + resourceMetrics3 := pmetric.NewResourceMetrics() ilm3 := resourceMetrics3.ScopeMetrics().AppendEmpty() ilm3.Scope().SetName("test") tt.metric(ts2, 21, ilm3.Metrics()) @@ -385,45 +386,45 @@ func TestAccumulateMetrics(t *testing.T) { } } -func getMetricProperties(metric pdata.Metric) ( - attributes pdata.Map, +func getMetricProperties(metric pmetric.Metric) ( + attributes pcommon.Map, ts time.Time, value float64, - temporality pdata.MetricAggregationTemporality, + temporality pmetric.MetricAggregationTemporality, isMonotonic bool, ) { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: attributes = metric.Gauge().DataPoints().At(0).Attributes() ts = metric.Gauge().DataPoints().At(0).Timestamp().AsTime() dp := metric.Gauge().DataPoints().At(0) switch dp.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: value = float64(dp.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: value = dp.DoubleVal() } - temporality = pdata.MetricAggregationTemporalityUnspecified + temporality = pmetric.MetricAggregationTemporalityUnspecified isMonotonic = false - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: attributes = metric.Sum().DataPoints().At(0).Attributes() ts = metric.Sum().DataPoints().At(0).Timestamp().AsTime() dp := metric.Sum().DataPoints().At(0) switch dp.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: value = float64(dp.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: value = dp.DoubleVal() } temporality = metric.Sum().AggregationTemporality() isMonotonic = metric.Sum().IsMonotonic() - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: attributes = metric.Histogram().DataPoints().At(0).Attributes() ts = metric.Histogram().DataPoints().At(0).Timestamp().AsTime() value = metric.Histogram().DataPoints().At(0).Sum() temporality = metric.Histogram().AggregationTemporality() isMonotonic = true - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: attributes = metric.Summary().DataPoints().At(0).Attributes() ts = metric.Summary().DataPoints().At(0).Timestamp().AsTime() value = metric.Summary().DataPoints().At(0).Sum() diff --git a/exporter/prometheusexporter/collector.go b/exporter/prometheusexporter/collector.go index 4f8023ea29b5..7067afcb251d 100644 --- a/exporter/prometheusexporter/collector.go +++ b/exporter/prometheusexporter/collector.go @@ -20,8 +20,9 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -53,39 +54,39 @@ func (c *collector) Describe(_ chan<- *prometheus.Desc) {} /* Processing */ -func (c *collector) processMetrics(rm pdata.ResourceMetrics) (n int) { +func (c *collector) processMetrics(rm pmetric.ResourceMetrics) (n int) { return c.accumulator.Accumulate(rm) } var errUnknownMetricType = fmt.Errorf("unknown metric type") -func (c *collector) convertMetric(metric pdata.Metric, resourceAttrs pdata.Map) (prometheus.Metric, error) { +func (c *collector) convertMetric(metric pmetric.Metric, resourceAttrs pcommon.Map) (prometheus.Metric, error) { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return c.convertGauge(metric, resourceAttrs) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return c.convertSum(metric, resourceAttrs) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return c.convertDoubleHistogram(metric, resourceAttrs) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return c.convertSummary(metric, resourceAttrs) } return nil, errUnknownMetricType } -func (c *collector) metricName(namespace string, metric pdata.Metric) string { +func (c *collector) metricName(namespace string, metric pmetric.Metric) string { if namespace != "" { return namespace + "_" + sanitize(metric.Name(), c.skipSanitizeLabel) } return sanitize(metric.Name(), c.skipSanitizeLabel) } -func (c *collector) getMetricMetadata(metric pdata.Metric, attributes pdata.Map, resourceAttrs pdata.Map) (*prometheus.Desc, []string) { +func (c *collector) getMetricMetadata(metric pmetric.Metric, attributes pcommon.Map, resourceAttrs pcommon.Map) (*prometheus.Desc, []string) { keys := make([]string, 0, attributes.Len()+2) // +2 for job and instance labels. values := make([]string, 0, attributes.Len()+2) - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { keys = append(keys, sanitize(k, c.skipSanitizeLabel)) values = append(values, v.AsString()) return true @@ -114,15 +115,15 @@ func (c *collector) getMetricMetadata(metric pdata.Metric, attributes pdata.Map, ), values } -func (c *collector) convertGauge(metric pdata.Metric, resourceAttrs pdata.Map) (prometheus.Metric, error) { +func (c *collector) convertGauge(metric pmetric.Metric, resourceAttrs pcommon.Map) (prometheus.Metric, error) { ip := metric.Gauge().DataPoints().At(0) desc, attributes := c.getMetricMetadata(metric, ip.Attributes(), resourceAttrs) var value float64 switch ip.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: value = float64(ip.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: value = ip.DoubleVal() } m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, value, attributes...) @@ -136,7 +137,7 @@ func (c *collector) convertGauge(metric pdata.Metric, resourceAttrs pdata.Map) ( return m, nil } -func (c *collector) convertSum(metric pdata.Metric, resourceAttrs pdata.Map) (prometheus.Metric, error) { +func (c *collector) convertSum(metric pmetric.Metric, resourceAttrs pcommon.Map) (prometheus.Metric, error) { ip := metric.Sum().DataPoints().At(0) metricType := prometheus.GaugeValue @@ -147,9 +148,9 @@ func (c *collector) convertSum(metric pdata.Metric, resourceAttrs pdata.Map) (pr desc, attributes := c.getMetricMetadata(metric, ip.Attributes(), resourceAttrs) var value float64 switch ip.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: value = float64(ip.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: value = ip.DoubleVal() } m, err := prometheus.NewConstMetric(desc, metricType, value, attributes...) @@ -163,7 +164,7 @@ func (c *collector) convertSum(metric pdata.Metric, resourceAttrs pdata.Map) (pr return m, nil } -func (c *collector) convertSummary(metric pdata.Metric, resourceAttrs pdata.Map) (prometheus.Metric, error) { +func (c *collector) convertSummary(metric pmetric.Metric, resourceAttrs pcommon.Map) (prometheus.Metric, error) { // TODO: In the off chance that we have multiple points // within the same metric, how should we handle them? point := metric.Summary().DataPoints().At(0) @@ -187,7 +188,7 @@ func (c *collector) convertSummary(metric pdata.Metric, resourceAttrs pdata.Map) return m, nil } -func (c *collector) convertDoubleHistogram(metric pdata.Metric, resourceAttrs pdata.Map) (prometheus.Metric, error) { +func (c *collector) convertDoubleHistogram(metric pmetric.Metric, resourceAttrs pcommon.Map) (prometheus.Metric, error) { ip := metric.Histogram().DataPoints().At(0) desc, attributes := c.getMetricMetadata(metric, ip.Attributes(), resourceAttrs) diff --git a/exporter/prometheusexporter/collector_test.go b/exporter/prometheusexporter/collector_test.go index 19d8d7bfc75d..cf34de88f885 100644 --- a/exporter/prometheusexporter/collector_test.go +++ b/exporter/prometheusexporter/collector_test.go @@ -21,23 +21,24 @@ import ( "github.com/prometheus/client_golang/prometheus" io_prometheus_client "github.com/prometheus/client_model/go" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) type mockAccumulator struct { - metrics []pdata.Metric - resourceAttributes pdata.Map // Same attributes for all metrics. + metrics []pmetric.Metric + resourceAttributes pcommon.Map // Same attributes for all metrics. } -func (a *mockAccumulator) Accumulate(pdata.ResourceMetrics) (n int) { +func (a *mockAccumulator) Accumulate(pmetric.ResourceMetrics) (n int) { return 0 } -func (a *mockAccumulator) Collect() ([]pdata.Metric, []pdata.Map) { - rAttrs := make([]pdata.Map, len(a.metrics)) +func (a *mockAccumulator) Collect() ([]pmetric.Metric, []pcommon.Map) { + rAttrs := make([]pcommon.Map, len(a.metrics)) for i := range rAttrs { rAttrs[i] = a.resourceAttributes } @@ -46,17 +47,17 @@ func (a *mockAccumulator) Collect() ([]pdata.Metric, []pdata.Map) { } func TestConvertInvalidDataType(t *testing.T) { - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetDataType(-100) c := collector{ accumulator: &mockAccumulator{ - []pdata.Metric{metric}, - pdata.NewMap(), + []pmetric.Metric{metric}, + pcommon.NewMap(), }, logger: zap.NewNop(), } - _, err := c.convertMetric(metric, pdata.NewMap()) + _, err := c.convertMetric(metric, pcommon.NewMap()) require.Equal(t, errUnknownMetricType, err) ch := make(chan prometheus.Metric, 1) @@ -73,24 +74,24 @@ func TestConvertInvalidDataType(t *testing.T) { } func TestConvertInvalidMetric(t *testing.T) { - for _, mType := range []pdata.MetricDataType{ - pdata.MetricDataTypeHistogram, - pdata.MetricDataTypeSum, - pdata.MetricDataTypeGauge, + for _, mType := range []pmetric.MetricDataType{ + pmetric.MetricDataTypeHistogram, + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeGauge, } { - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetDataType(mType) switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: metric.Gauge().DataPoints().AppendEmpty() - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: metric.Sum().DataPoints().AppendEmpty() - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: metric.Histogram().DataPoints().AppendEmpty() } c := collector{} - _, err := c.convertMetric(metric, pdata.NewMap()) + _, err := c.convertMetric(metric, pcommon.NewMap()) require.Error(t, err) } } @@ -117,22 +118,22 @@ func (c *errorCheckCore) Write(ent zapcore.Entry, _ []zapcore.Field) error { func (*errorCheckCore) Sync() error { return nil } func TestCollectMetricsLabelSanitize(t *testing.T) { - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetIntVal(42) dp.Attributes().InsertString("label.1", "1") dp.Attributes().InsertString("label/2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(time.Now())) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) loggerCore := errorCheckCore{} c := collector{ namespace: "test_space", accumulator: &mockAccumulator{ - []pdata.Metric{metric}, - pdata.NewMap(), + []pmetric.Metric{metric}, + pcommon.NewMap(), }, sendTimestamps: false, logger: zap.New(&loggerCore), @@ -163,7 +164,7 @@ func TestCollectMetricsLabelSanitize(t *testing.T) { func TestCollectMetrics(t *testing.T) { tests := []struct { name string - metric func(time.Time) pdata.Metric + metric func(time.Time) pmetric.Metric metricType prometheus.ValueType value float64 }{ @@ -171,16 +172,16 @@ func TestCollectMetrics(t *testing.T) { name: "IntGauge", metricType: prometheus.GaugeValue, value: 42.0, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetIntVal(42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, @@ -189,16 +190,16 @@ func TestCollectMetrics(t *testing.T) { name: "Gauge", metricType: prometheus.GaugeValue, value: 42.42, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetDescription("test description") dp := metric.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(42.42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, @@ -207,18 +208,18 @@ func TestCollectMetrics(t *testing.T) { name: "IntSum", metricType: prometheus.GaugeValue, value: 42.0, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, @@ -227,18 +228,18 @@ func TestCollectMetrics(t *testing.T) { name: "Sum", metricType: prometheus.GaugeValue, value: 42.42, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(42.42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, @@ -247,18 +248,18 @@ func TestCollectMetrics(t *testing.T) { name: "MonotonicIntSum", metricType: prometheus.CounterValue, value: 42.0, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, @@ -267,18 +268,18 @@ func TestCollectMetrics(t *testing.T) { name: "MonotonicSum", metricType: prometheus.CounterValue, value: 42.42, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Sum().DataPoints().AppendEmpty() dp.SetDoubleVal(42.42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, @@ -292,7 +293,7 @@ func TestCollectMetrics(t *testing.T) { name += "/WithTimestamp" } - rAttrs := pdata.NewMap() + rAttrs := pcommon.NewMap() rAttrs.InsertString(conventions.AttributeServiceInstanceID, "localhost:9090") rAttrs.InsertString(conventions.AttributeServiceName, "testapp") rAttrs.InsertString(conventions.AttributeServiceNamespace, "prod") @@ -303,7 +304,7 @@ func TestCollectMetrics(t *testing.T) { c := collector{ namespace: "test_space", accumulator: &mockAccumulator{ - []pdata.Metric{metric}, + []pmetric.Metric{metric}, rAttrs, }, sendTimestamps: sendTimestamp, @@ -358,7 +359,7 @@ func TestCollectMetrics(t *testing.T) { func TestAccumulateHistograms(t *testing.T) { tests := []struct { name string - metric func(time.Time) pdata.Metric + metric func(time.Time) pmetric.Metric histogramPoints map[float64]uint64 histogramSum float64 @@ -372,11 +373,11 @@ func TestAccumulateHistograms(t *testing.T) { }, histogramSum: 42.42, histogramCount: 7, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) metric.SetDescription("test description") dp := metric.Histogram().DataPoints().AppendEmpty() dp.SetBucketCounts([]uint64{5, 2}) @@ -385,7 +386,7 @@ func TestAccumulateHistograms(t *testing.T) { dp.SetSum(42.42) dp.Attributes().InsertString("label_1", "1") dp.Attributes().InsertString("label_2", "2") - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return }, }, @@ -402,8 +403,8 @@ func TestAccumulateHistograms(t *testing.T) { metric := tt.metric(ts) c := collector{ accumulator: &mockAccumulator{ - []pdata.Metric{metric}, - pdata.NewMap(), + []pmetric.Metric{metric}, + pcommon.NewMap(), }, sendTimestamps: sendTimestamp, logger: zap.NewNop(), @@ -454,13 +455,13 @@ func TestAccumulateHistograms(t *testing.T) { } func TestAccumulateSummary(t *testing.T) { - fillQuantileValue := func(pN, value float64, dest pdata.ValueAtQuantile) { + fillQuantileValue := func(pN, value float64, dest pmetric.ValueAtQuantile) { dest.SetQuantile(pN) dest.SetValue(value) } tests := []struct { name string - metric func(time.Time) pdata.Metric + metric func(time.Time) pmetric.Metric wantSum float64 wantCount uint64 wantQuantiles map[float64]float64 @@ -473,10 +474,10 @@ func TestAccumulateSummary(t *testing.T) { 0.50: 190, 0.99: 817, }, - metric: func(ts time.Time) (metric pdata.Metric) { - metric = pdata.NewMetric() + metric: func(ts time.Time) (metric pmetric.Metric) { + metric = pmetric.NewMetric() metric.SetName("test_metric") - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) metric.SetDescription("test description") sp := metric.Summary().DataPoints().AppendEmpty() sp.SetCount(10) @@ -484,7 +485,7 @@ func TestAccumulateSummary(t *testing.T) { sp.SetCount(10) sp.Attributes().InsertString("label_1", "1") sp.Attributes().InsertString("label_2", "2") - sp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + sp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) fillQuantileValue(0.50, 190, sp.QuantileValues().AppendEmpty()) fillQuantileValue(0.99, 817, sp.QuantileValues().AppendEmpty()) @@ -505,8 +506,8 @@ func TestAccumulateSummary(t *testing.T) { metric := tt.metric(ts) c := collector{ accumulator: &mockAccumulator{ - []pdata.Metric{metric}, - pdata.NewMap(), + []pmetric.Metric{metric}, + pcommon.NewMap(), }, sendTimestamps: sendTimestamp, logger: zap.NewNop(), diff --git a/exporter/prometheusexporter/go.mod b/exporter/prometheusexporter/go.mod index 43375d7736ac..dcf0b123b30f 100644 --- a/exporter/prometheusexporter/go.mod +++ b/exporter/prometheusexporter/go.mod @@ -10,8 +10,8 @@ require ( github.com/prometheus/client_model v0.2.0 github.com/prometheus/prometheus v1.8.2-0.20220324155304-4d8bbfd4164c github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 gopkg.in/yaml.v2 v2.4.0 ) @@ -32,7 +32,7 @@ require ( github.com/armon/go-metrics v0.3.10 // indirect github.com/aws/aws-sdk-go v1.43.32 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect github.com/containerd/containerd v1.6.1 // indirect @@ -75,7 +75,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b // indirect github.com/linode/linodego v1.3.0 // indirect github.com/mattn/go-colorable v0.1.12 // indirect @@ -98,9 +98,9 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -146,3 +146,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../../pkg/resourcetotelemetry replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => ../../receiver/prometheusreceiver + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/prometheusexporter/go.sum b/exporter/prometheusexporter/go.sum index b642121d4d4b..a8dc4eb6ed1e 100644 --- a/exporter/prometheusexporter/go.sum +++ b/exporter/prometheusexporter/go.sum @@ -51,7 +51,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -187,8 +187,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -847,8 +848,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1165,8 +1166,6 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1285,10 +1284,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= diff --git a/exporter/prometheusexporter/prometheus.go b/exporter/prometheusexporter/prometheus.go index 84bdf2411f23..a30bb9c5c8c4 100644 --- a/exporter/prometheusexporter/prometheus.go +++ b/exporter/prometheusexporter/prometheus.go @@ -24,7 +24,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) type prometheusExporter struct { @@ -81,7 +81,7 @@ func (pe *prometheusExporter) Start(_ context.Context, _ component.Host) error { return nil } -func (pe *prometheusExporter) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +func (pe *prometheusExporter) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { n := 0 rmetrics := md.ResourceMetrics() for i := 0; i < rmetrics.Len(); i++ { diff --git a/exporter/prometheusexporter/prometheus_test.go b/exporter/prometheusexporter/prometheus_test.go index dfeda8c8fb11..5769ec009341 100644 --- a/exporter/prometheusexporter/prometheus_test.go +++ b/exporter/prometheusexporter/prometheus_test.go @@ -27,7 +27,8 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry" @@ -307,21 +308,21 @@ func TestPrometheusExporter_endToEndWithResource(t *testing.T) { } } -func metricBuilder(delta int64, prefix string) pdata.Metrics { - md := pdata.NewMetrics() +func metricBuilder(delta int64, prefix string) pmetric.Metrics { + md := pmetric.NewMetrics() ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() m1 := ms.AppendEmpty() m1.SetName(prefix + "this/one/there(where)") m1.SetDescription("Extra ones") m1.SetUnit("1") - m1.SetDataType(pdata.MetricDataTypeSum) + m1.SetDataType(pmetric.MetricDataTypeSum) d1 := m1.Sum() d1.SetIsMonotonic(true) - d1.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + d1.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp1 := d1.DataPoints().AppendEmpty() - dp1.SetStartTimestamp(pdata.NewTimestampFromTime(time.Unix(1543160298+delta, 100000090))) - dp1.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1543160298+delta, 100000997))) + dp1.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(1543160298+delta, 100000090))) + dp1.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1543160298+delta, 100000997))) dp1.Attributes().UpsertString("os", "windows") dp1.Attributes().UpsertString("arch", "x86") dp1.SetIntVal(99 + delta) @@ -330,13 +331,13 @@ func metricBuilder(delta int64, prefix string) pdata.Metrics { m2.SetName(prefix + "this/one/there(where)") m2.SetDescription("Extra ones") m2.SetUnit("1") - m2.SetDataType(pdata.MetricDataTypeSum) + m2.SetDataType(pmetric.MetricDataTypeSum) d2 := m2.Sum() d2.SetIsMonotonic(true) - d2.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + d2.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp2 := d2.DataPoints().AppendEmpty() - dp2.SetStartTimestamp(pdata.NewTimestampFromTime(time.Unix(1543160298, 100000090))) - dp2.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1543160298, 100000997))) + dp2.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(1543160298, 100000090))) + dp2.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1543160298, 100000997))) dp2.Attributes().UpsertString("os", "linux") dp2.Attributes().UpsertString("arch", "x86") dp2.SetIntVal(100 + delta) diff --git a/exporter/prometheusremotewriteexporter/exporter.go b/exporter/prometheusremotewriteexporter/exporter.go index c3aeca6f3aed..b989fa1a6e2c 100644 --- a/exporter/prometheusremotewriteexporter/exporter.go +++ b/exporter/prometheusremotewriteexporter/exporter.go @@ -33,7 +33,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite" @@ -125,7 +125,7 @@ func (prwe *prwExporter) Shutdown(context.Context) error { // PushMetrics converts metrics to Prometheus remote write TimeSeries and send to remote endpoint. It maintain a map of // TimeSeries, validates and handles each individual metric, adding the converted TimeSeries to the map, and finally // exports the map. -func (prwe *prwExporter) PushMetrics(ctx context.Context, md pdata.Metrics) error { +func (prwe *prwExporter) PushMetrics(ctx context.Context, md pmetric.Metrics) error { prwe.wg.Add(1) defer prwe.wg.Done() diff --git a/exporter/prometheusremotewriteexporter/exporter_test.go b/exporter/prometheusremotewriteexporter/exporter_test.go index 0d14b3ed69f1..b718ca136264 100644 --- a/exporter/prometheusremotewriteexporter/exporter_test.go +++ b/exporter/prometheusremotewriteexporter/exporter_test.go @@ -35,7 +35,7 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" ) @@ -226,7 +226,7 @@ func Test_Shutdown(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - errChan <- prwe.PushMetrics(context.Background(), pdata.NewMetrics()) + errChan <- prwe.PushMetrics(context.Background(), pmetric.NewMetrics()) }() } wg.Wait() @@ -421,7 +421,7 @@ func Test_PushMetrics(t *testing.T) { tests := []struct { name string - md *pdata.Metrics + md *pmetric.Metrics reqTestFunc func(t *testing.T, r *http.Request, expected int, isStaleMarker bool) expectedTimeSeries int httpResponseCode int diff --git a/exporter/prometheusremotewriteexporter/go.mod b/exporter/prometheusremotewriteexporter/go.mod index 15417c37f430..5a04a30d9b3b 100644 --- a/exporter/prometheusremotewriteexporter/go.mod +++ b/exporter/prometheusremotewriteexporter/go.mod @@ -13,14 +13,14 @@ require ( github.com/prometheus/prometheus v1.8.2-0.20220117154355-4855a0c067e2 github.com/stretchr/testify v1.7.1 github.com/tidwall/wal v1.1.7 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -28,28 +28,27 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/common v0.33.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tidwall/gjson v1.10.2 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tidwall/tinylru v1.1.0 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -61,3 +60,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/corei replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../../pkg/resourcetotelemetry replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite => ../../pkg/translator/prometheusremotewrite + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/prometheusremotewriteexporter/go.sum b/exporter/prometheusremotewriteexporter/go.sum index e19c10f85e4c..2c6886eb50bc 100644 --- a/exporter/prometheusremotewriteexporter/go.sum +++ b/exporter/prometheusremotewriteexporter/go.sum @@ -209,8 +209,8 @@ github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= @@ -715,7 +715,6 @@ github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -864,8 +863,8 @@ github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1190,8 +1189,6 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1319,10 +1316,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -1333,7 +1332,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -1658,8 +1657,9 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/exporter/prometheusremotewriteexporter/testutil_test.go b/exporter/prometheusremotewriteexporter/testutil_test.go index 50d17982a341..64961e36f65e 100644 --- a/exporter/prometheusremotewriteexporter/testutil_test.go +++ b/exporter/prometheusremotewriteexporter/testutil_test.go @@ -20,7 +20,8 @@ import ( "time" "github.com/prometheus/prometheus/prompb" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) var ( @@ -68,7 +69,7 @@ var ( unmatchedBoundBucketHist = "unmatchedBoundBucketHist" // valid metrics as input should not return error - validMetrics1 = map[string]pdata.Metric{ + validMetrics1 = map[string]pmetric.Metric{ validIntGauge: getIntGaugeMetric(validIntGauge, lbs1, intVal1, time1), validDoubleGauge: getDoubleGaugeMetric(validDoubleGauge, lbs1, floatVal1, time1), validIntSum: getIntSumMetric(validIntSum, lbs1, intVal1, time1), @@ -77,7 +78,7 @@ var ( validHistogram: getHistogramMetric(validHistogram, lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), validSummary: getSummaryMetric(validSummary, lbs1, time1, floatVal1, uint64(intVal1), quantiles), } - validMetrics2 = map[string]pdata.Metric{ + validMetrics2 = map[string]pmetric.Metric{ validIntGauge: getIntGaugeMetric(validIntGauge, lbs2, intVal2, time2), validDoubleGauge: getDoubleGaugeMetric(validDoubleGauge, lbs2, floatVal2, time2), validIntSum: getIntSumMetric(validIntSum, lbs2, intVal2, time2), @@ -85,7 +86,7 @@ var ( validHistogram: getHistogramMetric(validHistogram, lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), validSummary: getSummaryMetric(validSummary, lbs2, time2, floatVal2, uint64(intVal2), quantiles), validIntGaugeDirty: getIntGaugeMetric(validIntGaugeDirty, lbs1, intVal1, time1), - unmatchedBoundBucketHist: getHistogramMetric(unmatchedBoundBucketHist, pdata.NewMap(), 0, 0, 0, []float64{0.1, 0.2, 0.3}, []uint64{1, 2}), + unmatchedBoundBucketHist: getHistogramMetric(unmatchedBoundBucketHist, pcommon.NewMap(), 0, 0, 0, []float64{0.1, 0.2, 0.3}, []uint64{1, 2}), } empty = "empty" @@ -101,8 +102,8 @@ var ( emptyCumulativeHistogram = "emptyCumulativeHistogram" // different metrics that will not pass validate metrics and will cause the exporter to return an error - invalidMetrics = map[string]pdata.Metric{ - empty: pdata.NewMetric(), + invalidMetrics = map[string]pmetric.Metric{ + empty: pmetric.NewMetric(), emptyGauge: getEmptyGaugeMetric(emptyGauge), emptySum: getEmptySumMetric(emptySum), emptyHistogram: getEmptyHistogramMetric(emptyHistogram), @@ -119,7 +120,7 @@ var ( staleNaNSummary = "staleNaNSummary" // staleNaN metrics as input should have the staleness marker flag - staleNaNMetrics = map[string]pdata.Metric{ + staleNaNMetrics = map[string]pmetric.Metric{ staleNaNIntGauge: getIntGaugeMetric(staleNaNIntGauge, lbs1, intVal1, time1), staleNaNDoubleGauge: getDoubleGaugeMetric(staleNaNDoubleGauge, lbs1, floatVal1, time1), staleNaNIntSum: getIntSumMetric(staleNaNIntSum, lbs1, intVal1, time1), @@ -132,8 +133,8 @@ var ( // OTLP metrics // attributes must come in pairs -func getAttributes(labels ...string) pdata.Map { - attributeMap := pdata.NewMap() +func getAttributes(labels ...string) pcommon.Map { + attributeMap := pcommon.NewMap() for i := 0; i < len(labels); i += 2 { attributeMap.UpsertString(labels[i], labels[i+1]) } @@ -172,8 +173,8 @@ func getTimeSeries(labels []prompb.Label, samples ...prompb.Sample) *prompb.Time } } -func getMetricsFromMetricList(metricList ...pdata.Metric) pdata.Metrics { - metrics := pdata.NewMetrics() +func getMetricsFromMetricList(metricList ...pmetric.Metric) pmetric.Metrics { + metrics := pmetric.NewMetrics() rm := metrics.ResourceMetrics().AppendEmpty() ilm := rm.ScopeMetrics().AppendEmpty() @@ -185,17 +186,17 @@ func getMetricsFromMetricList(metricList ...pdata.Metric) pdata.Metrics { return metrics } -func getEmptyGaugeMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyGaugeMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) return metric } -func getIntGaugeMetric(name string, attributes pdata.Map, value int64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getIntGaugeMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) dp := metric.Gauge().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -203,15 +204,15 @@ func getIntGaugeMetric(name string, attributes pdata.Map, value int64, ts uint64 dp.SetIntVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getDoubleGaugeMetric(name string, attributes pdata.Map, value float64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getDoubleGaugeMetric(name string, attributes pcommon.Map, value float64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) dp := metric.Gauge().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -219,23 +220,23 @@ func getDoubleGaugeMetric(name string, attributes pdata.Map, value float64, ts u dp.SetDoubleVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptySumMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptySumMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) return metric } -func getIntSumMetric(name string, attributes pdata.Map, value int64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getIntSumMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -243,24 +244,24 @@ func getIntSumMetric(name string, attributes pdata.Map, value int64, ts uint64) dp.SetIntVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptyCumulativeSumMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyCumulativeSumMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) return metric } -func getSumMetric(name string, attributes pdata.Map, value float64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getSumMetric(name string, attributes pcommon.Map, value float64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -268,31 +269,31 @@ func getSumMetric(name string, attributes pdata.Map, value float64, ts uint64) p dp.SetDoubleVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptyHistogramMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyHistogramMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) return metric } -func getEmptyCumulativeHistogramMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyCumulativeHistogramMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) return metric } -func getHistogramMetric(name string, attributes pdata.Map, ts uint64, sum float64, count uint64, bounds []float64, buckets []uint64) pdata.Metric { - metric := pdata.NewMetric() +func getHistogramMetric(name string, attributes pcommon.Map, ts uint64, sum float64, count uint64, bounds []float64, buckets []uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Histogram().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -303,33 +304,33 @@ func getHistogramMetric(name string, attributes pdata.Map, ts uint64, sum float6 dp.SetExplicitBounds(bounds) attributes.CopyTo(dp.Attributes()) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptySummaryMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptySummaryMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) return metric } -func getSummaryMetric(name string, attributes pdata.Map, ts uint64, sum float64, count uint64, quantiles pdata.ValueAtQuantileSlice) pdata.Metric { - metric := pdata.NewMetric() +func getSummaryMetric(name string, attributes pcommon.Map, ts uint64, sum float64, count uint64, quantiles pmetric.ValueAtQuantileSlice) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) dp := metric.Summary().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) } dp.SetCount(count) dp.SetSum(sum) - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { dp.Attributes().Upsert(k, v) return true }) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetTimestamp(pcommon.Timestamp(ts)) quantiles.CopyTo(dp.QuantileValues()) quantiles.At(0).Quantile() @@ -337,8 +338,8 @@ func getSummaryMetric(name string, attributes pdata.Map, ts uint64, sum float64, return metric } -func getQuantiles(bounds []float64, values []float64) pdata.ValueAtQuantileSlice { - quantiles := pdata.NewValueAtQuantileSlice() +func getQuantiles(bounds []float64, values []float64) pmetric.ValueAtQuantileSlice { + quantiles := pmetric.NewValueAtQuantileSlice() quantiles.EnsureCapacity(len(bounds)) for i := 0; i < len(bounds); i++ { diff --git a/exporter/sapmexporter/exporter.go b/exporter/sapmexporter/exporter.go index 8fc1678ffa50..8c5c23cdde9b 100644 --- a/exporter/sapmexporter/exporter.go +++ b/exporter/sapmexporter/exporter.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -101,13 +101,13 @@ func newSAPMTracesExporter(cfg *Config, set component.ExporterCreateSettings) (c // pushTraceData exports traces in SAPM proto by associated SFx access token and returns number of dropped spans // and the last experienced error if any translation or export failed -func (se *sapmExporter) pushTraceData(ctx context.Context, td pdata.Traces) error { +func (se *sapmExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { rss := td.ResourceSpans() if rss.Len() == 0 { return nil } - // All metrics in the pdata.Metrics will have the same access token because of the BatchPerResourceMetrics. + // All metrics in the pmetric.Metrics will have the same access token because of the BatchPerResourceMetrics. accessToken := se.retrieveAccessToken(rss.At(0)) batches, err := jaeger.ProtoFromTraces(td) if err != nil { @@ -129,7 +129,7 @@ func (se *sapmExporter) pushTraceData(ctx context.Context, td pdata.Traces) erro return nil } -func (se *sapmExporter) retrieveAccessToken(md pdata.ResourceSpans) string { +func (se *sapmExporter) retrieveAccessToken(md ptrace.ResourceSpans) string { if !se.config.AccessTokenPassthrough { // Nothing to do if token is pass through not configured or resource is nil. return "" diff --git a/exporter/sapmexporter/exporter_test.go b/exporter/sapmexporter/exporter_test.go index 7de1a3812dd0..3b74284b7074 100644 --- a/exporter/sapmexporter/exporter_test.go +++ b/exporter/sapmexporter/exporter_test.go @@ -28,7 +28,8 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" @@ -63,8 +64,8 @@ func TestCreateTracesExporterWithInvalidConfig(t *testing.T) { assert.Nil(t, te) } -func buildTestTraces(setTokenLabel bool) (traces pdata.Traces) { - traces = pdata.NewTraces() +func buildTestTraces(setTokenLabel bool) (traces ptrace.Traces) { + traces = ptrace.NewTraces() rss := traces.ResourceSpans() rss.EnsureCapacity(20) @@ -85,8 +86,8 @@ func buildTestTraces(setTokenLabel bool) (traces pdata.Traces) { span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() name := fmt.Sprintf("Span%d", i) span.SetName(name) - span.SetTraceID(pdata.NewTraceID([16]byte{1})) - span.SetSpanID(pdata.NewSpanID([8]byte{1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1})) } return traces @@ -133,8 +134,8 @@ func hasToken(batches []*model.Batch) bool { return false } -func buildTestTrace() pdata.Traces { - trace := pdata.NewTraces() +func buildTestTrace() ptrace.Traces { + trace := ptrace.NewTraces() trace.ResourceSpans().EnsureCapacity(2) for i := 0; i < 2; i++ { rs := trace.ResourceSpans().AppendEmpty() @@ -148,8 +149,8 @@ func buildTestTrace() pdata.Traces { var spanIDBytes [8]byte rand.Read(traceIDBytes[:]) rand.Read(spanIDBytes[:]) - span.SetTraceID(pdata.NewTraceID(traceIDBytes)) - span.SetSpanID(pdata.NewSpanID(spanIDBytes)) + span.SetTraceID(pcommon.NewTraceID(traceIDBytes)) + span.SetSpanID(pcommon.NewSpanID(spanIDBytes)) } return trace } diff --git a/exporter/sapmexporter/go.mod b/exporter/sapmexporter/go.mod index 2da5d36a0bcb..939aa023fe4b 100644 --- a/exporter/sapmexporter/go.mod +++ b/exporter/sapmexporter/go.mod @@ -9,42 +9,37 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/signalfx/sapm-proto v0.9.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( github.com/apache/thrift v0.16.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -57,3 +52,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperre replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/sapmexporter/go.sum b/exporter/sapmexporter/go.sum index e1dcbc29217f..c0d30106fb20 100644 --- a/exporter/sapmexporter/go.sum +++ b/exporter/sapmexporter/go.sum @@ -46,7 +46,7 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -117,8 +117,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -208,10 +208,12 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -437,7 +439,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= @@ -542,8 +543,8 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -688,6 +689,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -699,6 +701,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -732,7 +735,7 @@ github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/signalfx/sapm-proto v0.9.0 h1:x4EfhzOZtBGyt2x8gc/C23Id9B+3lf1zE59VUWLKbpQ= github.com/signalfx/sapm-proto v0.9.0/go.mod h1:OmhyyGyhBzoKQn6G2wM1vpEsGKGo0lym/kj0G41KqZk= @@ -752,7 +755,6 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= @@ -784,8 +786,8 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -836,13 +838,16 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= go.opentelemetry.io/collector/model v0.45.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= @@ -855,16 +860,16 @@ go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOU go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= -go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= +go.opentelemetry.io/otel/exporters/prometheus v0.29.0/go.mod h1:Er2VVJQZbHysogooLNchdZ3MLYoI7+d15mHmrRlRJCU= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= +go.opentelemetry.io/otel/sdk/metric v0.29.0/go.mod h1:IFkFNKI8Gq8zBdqOKdODCL9+LInBZLXaGpqSIKphNuU= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= @@ -1006,8 +1011,9 @@ golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1025,6 +1031,7 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1119,7 +1126,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1129,9 +1135,9 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/exporter/sentryexporter/go.mod b/exporter/sentryexporter/go.mod index 12d5bb58c3e9..01cb3a4395e6 100644 --- a/exporter/sentryexporter/go.mod +++ b/exporter/sentryexporter/go.mod @@ -6,17 +6,17 @@ require ( github.com/getsentry/sentry-go v0.13.0 github.com/google/go-cmp v0.5.7 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -24,7 +24,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -32,14 +31,11 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20211008194852-3b03d305991f // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/sentryexporter/go.sum b/exporter/sentryexporter/go.sum index 516fa623bc9e..d2618f9287e3 100644 --- a/exporter/sentryexporter/go.sum +++ b/exporter/sentryexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,19 +15,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -38,9 +27,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -50,7 +36,6 @@ github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWp github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/getsentry/sentry-go v0.13.0 h1:20dgTiUSfxRB/EhMPtxcL9ZEbM1ZdR+W/7f7NWD+xWo= github.com/getsentry/sentry-go v0.13.0/go.mod h1:EOsfu5ZdvKPfeHYV6pTVQnsjfp30+XA7//UooKNumH0= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -68,18 +53,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -89,13 +70,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -125,8 +103,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -169,21 +147,16 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -193,20 +166,21 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -230,20 +204,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211008194852-3b03d305991f h1:1scJEYZBaF48BaG6tYbtxmLcXqwYGSfGcMoStTqkkIw= -golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -259,22 +229,19 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -295,22 +262,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -320,11 +281,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -332,8 +289,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/sentryexporter/sentry_exporter.go b/exporter/sentryexporter/sentry_exporter.go index f8d6d445b9b0..79964a6647d1 100644 --- a/exporter/sentryexporter/sentry_exporter.go +++ b/exporter/sentryexporter/sentry_exporter.go @@ -28,8 +28,9 @@ import ( "github.com/getsentry/sentry-go" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) const ( @@ -52,7 +53,7 @@ type SentryExporter struct { // pushTraceData takes an incoming OpenTelemetry trace, converts them into Sentry spans and transactions // and sends them using Sentry's transport. -func (s *SentryExporter) pushTraceData(_ context.Context, td pdata.Traces) error { +func (s *SentryExporter) pushTraceData(_ context.Context, td ptrace.Traces) error { var exceptionEvents []*sentry.Event resourceSpans := td.ResourceSpans() if resourceSpans.Len() == 0 { @@ -136,14 +137,14 @@ func generateTransactions(transactionMap map[sentry.SpanID]*sentry.Event, orphan // convertEventsToSentryExceptions creates a set of sentry events from exception events present in spans. // These events are stored in a mutated eventList -func convertEventsToSentryExceptions(eventList *[]*sentry.Event, events pdata.SpanEventSlice, sentrySpan *sentry.Span) { +func convertEventsToSentryExceptions(eventList *[]*sentry.Event, events ptrace.SpanEventSlice, sentrySpan *sentry.Span) { for i := 0; i < events.Len(); i++ { event := events.At(i) if event.Name() != "exception" { continue } var exceptionMessage, exceptionType string - event.Attributes().Range(func(k string, v pdata.Value) bool { + event.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case conventions.AttributeExceptionMessage: exceptionMessage = v.StringVal() @@ -222,7 +223,7 @@ func classifyAsOrphanSpans(orphanSpans []*sentry.Span, prevLength int, idMap map return classifyAsOrphanSpans(newOrphanSpans, len(orphanSpans), idMap, transactionMap) } -func convertToSentrySpan(span pdata.Span, library pdata.InstrumentationScope, resourceTags map[string]string) (sentrySpan *sentry.Span) { +func convertToSentrySpan(span ptrace.Span, library pcommon.InstrumentationScope, resourceTags map[string]string) (sentrySpan *sentry.Span) { attributes := span.Attributes() name := span.Name() spanKind := span.Kind() @@ -240,7 +241,7 @@ func convertToSentrySpan(span pdata.Span, library pdata.InstrumentationScope, re tags["status_message"] = message } - if spanKind != pdata.SpanKindUnspecified { + if spanKind != ptrace.SpanKindUnspecified { tags["span_kind"] = spanKind.String() } @@ -271,7 +272,7 @@ func convertToSentrySpan(span pdata.Span, library pdata.InstrumentationScope, re // // See https://github.com/open-telemetry/opentelemetry-specification/tree/5b78ee1/specification/trace/semantic_conventions // for more details about the semantic conventions. -func generateSpanDescriptors(name string, attrs pdata.Map, spanKind pdata.SpanKind) (op string, description string) { +func generateSpanDescriptors(name string, attrs pcommon.Map, spanKind ptrace.SpanKind) (op string, description string) { var opBuilder strings.Builder var dBuilder strings.Builder @@ -284,9 +285,9 @@ func generateSpanDescriptors(name string, attrs pdata.Map, spanKind pdata.SpanKi opBuilder.WriteString("http") switch spanKind { - case pdata.SpanKindClient: + case ptrace.SpanKindClient: opBuilder.WriteString(".client") - case pdata.SpanKindServer: + case ptrace.SpanKindServer: opBuilder.WriteString(".server") } @@ -335,22 +336,22 @@ func generateSpanDescriptors(name string, attrs pdata.Map, spanKind pdata.SpanKi return "", name } -func generateTagsFromResource(resource pdata.Resource) map[string]string { +func generateTagsFromResource(resource pcommon.Resource) map[string]string { return generateTagsFromAttributes(resource.Attributes()) } -func generateTagsFromAttributes(attrs pdata.Map) map[string]string { +func generateTagsFromAttributes(attrs pcommon.Map) map[string]string { tags := make(map[string]string) - attrs.Range(func(key string, attr pdata.Value) bool { + attrs.Range(func(key string, attr pcommon.Value) bool { switch attr.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: tags[key] = attr.StringVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: tags[key] = strconv.FormatBool(attr.BoolVal()) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: tags[key] = strconv.FormatFloat(attr.DoubleVal(), 'g', -1, 64) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: tags[key] = strconv.FormatInt(attr.IntVal(), 10) } return true @@ -359,7 +360,7 @@ func generateTagsFromAttributes(attrs pdata.Map) map[string]string { return tags } -func statusFromSpanStatus(spanStatus pdata.SpanStatus) (status sentry.SpanStatus, message string) { +func statusFromSpanStatus(spanStatus ptrace.SpanStatus) (status sentry.SpanStatus, message string) { code := spanStatus.Code() if code < 0 || int(code) >= len(canonicalCodes) { return sentry.SpanStatusUnknown, fmt.Sprintf("error code %d", code) @@ -370,9 +371,9 @@ func statusFromSpanStatus(spanStatus pdata.SpanStatus) (status sentry.SpanStatus // spanIsTransaction determines if a span should be sent to Sentry as a transaction. // If parent span id is empty or the span kind allows remote parent spans, then the span is a root span. -func spanIsTransaction(s pdata.Span) bool { +func spanIsTransaction(s ptrace.Span) bool { kind := s.Kind() - return s.ParentSpanID() == pdata.SpanID{} || kind == pdata.SpanKindServer || kind == pdata.SpanKindConsumer + return s.ParentSpanID() == pcommon.SpanID{} || kind == ptrace.SpanKindServer || kind == ptrace.SpanKindConsumer } // transactionFromSpan converts a span to a transaction. diff --git a/exporter/sentryexporter/sentry_exporter_test.go b/exporter/sentryexporter/sentry_exporter_test.go index 0b8cea9b11e3..05472f98caf4 100644 --- a/exporter/sentryexporter/sentry_exporter_test.go +++ b/exporter/sentryexporter/sentry_exporter_test.go @@ -24,8 +24,9 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) /* @@ -194,7 +195,7 @@ func TestSpanEventToSentryEvent(t *testing.T) { "library_version": "1.4.3", "aws_instance": "ap-south-1", "unique_id": "abcd1234", - "span_kind": pdata.SpanKindClient.String(), + "span_kind": ptrace.SpanKindClient.String(), "status_message": "message", }, StartTime: unixNanoToTime(123), @@ -284,24 +285,24 @@ func TestSpanEventToSentryEvent(t *testing.T) { func TestSpanToSentrySpan(t *testing.T) { t.Run("with root span and invalid parent span_id", func(t *testing.T) { - testSpan := pdata.NewSpan() - testSpan.SetParentSpanID(pdata.InvalidSpanID()) + testSpan := ptrace.NewSpan() + testSpan.SetParentSpanID(pcommon.InvalidSpanID()) - sentrySpan := convertToSentrySpan(testSpan, pdata.NewInstrumentationScope(), map[string]string{}) + sentrySpan := convertToSentrySpan(testSpan, pcommon.NewInstrumentationScope(), map[string]string{}) assert.NotNil(t, sentrySpan) assert.True(t, spanIsTransaction(testSpan)) }) t.Run("with full span", func(t *testing.T) { - testSpan := pdata.NewSpan() + testSpan := ptrace.NewSpan() - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) - spanID := pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) - parentSpanID := pdata.NewSpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1}) + spanID := pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}) + parentSpanID := pcommon.NewSpanID([8]byte{8, 7, 6, 5, 4, 3, 2, 1}) name := "span_name" - var startTime pdata.Timestamp = 123 - var endTime pdata.Timestamp = 1234567890 - kind := pdata.SpanKindClient + var startTime pcommon.Timestamp = 123 + var endTime pcommon.Timestamp = 1234567890 + kind := ptrace.SpanKindClient statusMessage := "message" testSpan.Attributes().InsertString("key", "value") @@ -315,9 +316,9 @@ func TestSpanToSentrySpan(t *testing.T) { testSpan.SetKind(kind) testSpan.Status().SetMessage(statusMessage) - testSpan.Status().SetCode(pdata.StatusCodeOk) + testSpan.Status().SetCode(ptrace.StatusCodeOk) - library := pdata.NewInstrumentationScope() + library := pcommon.NewInstrumentationScope() library.SetName("otel-python") library.SetVersion("1.4.3") @@ -343,7 +344,7 @@ func TestSpanToSentrySpan(t *testing.T) { "library_version": "1.4.3", "aws_instance": "ca-central-1", "unique_id": "abcd1234", - "span_kind": pdata.SpanKindClient.String(), + "span_kind": ptrace.SpanKindClient.String(), "status_message": statusMessage, }, StartTime: unixNanoToTime(startTime), @@ -362,8 +363,8 @@ type SpanDescriptorsCase struct { testName string // input name string - attrs pdata.Map - spanKind pdata.SpanKind + attrs pcommon.Map + spanKind ptrace.SpanKind // output op string description string @@ -374,71 +375,71 @@ func TestGenerateSpanDescriptors(t *testing.T) { { testName: "http-client", name: "/api/users/{user_id}", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPMethod: "GET", }), - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, op: "http.client", description: "GET /api/users/{user_id}", }, { testName: "http-server", name: "/api/users/{user_id}", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPMethod: "POST", }), - spanKind: pdata.SpanKindServer, + spanKind: ptrace.SpanKindServer, op: "http.server", description: "POST /api/users/{user_id}", }, { testName: "db-call-without-statement", name: "SET mykey 'Val'", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeDBSystem: "redis", }), - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, op: "db", description: "SET mykey 'Val'", }, { testName: "db-call-with-statement", name: "mysql call", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeDBSystem: "sqlite", conventions.AttributeDBStatement: "SELECT * FROM table", }), - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, op: "db", description: "SELECT * FROM table", }, { testName: "rpc", name: "grpc.test.EchoService/Echo", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeRPCService: "EchoService", }), - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, op: "rpc", description: "grpc.test.EchoService/Echo", }, { testName: "message-system", name: "message-destination", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ "messaging.system": "kafka", }), - spanKind: pdata.SpanKindProducer, + spanKind: ptrace.SpanKindProducer, op: "message", description: "message-destination", }, { testName: "faas", name: "message-destination", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ "faas.trigger": "pubsub", }), - spanKind: pdata.SpanKindServer, + spanKind: ptrace.SpanKindServer, op: "pubsub", description: "message-destination", }, @@ -454,7 +455,7 @@ func TestGenerateSpanDescriptors(t *testing.T) { } func TestGenerateTagsFromAttributes(t *testing.T) { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() attrs.InsertString("string-key", "string-value") attrs.InsertBool("bool-key", true) @@ -476,7 +477,7 @@ func TestGenerateTagsFromAttributes(t *testing.T) { type SpanStatusCase struct { testName string // input - spanStatus pdata.SpanStatus + spanStatus ptrace.SpanStatus // output status sentry.SpanStatus message string @@ -486,16 +487,16 @@ func TestStatusFromSpanStatus(t *testing.T) { testCases := []SpanStatusCase{ { testName: "with empty status", - spanStatus: pdata.NewSpanStatus(), + spanStatus: ptrace.NewSpanStatus(), status: sentry.SpanStatusUndefined, message: "", }, { testName: "with status code", - spanStatus: func() pdata.SpanStatus { - spanStatus := pdata.NewSpanStatus() + spanStatus: func() ptrace.SpanStatus { + spanStatus := ptrace.NewSpanStatus() spanStatus.SetMessage("message") - spanStatus.SetCode(pdata.StatusCodeError) + spanStatus.SetCode(ptrace.StatusCodeError) return spanStatus }(), @@ -504,10 +505,10 @@ func TestStatusFromSpanStatus(t *testing.T) { }, { testName: "with unimplemented status code", - spanStatus: func() pdata.SpanStatus { - spanStatus := pdata.NewSpanStatus() + spanStatus: func() ptrace.SpanStatus { + spanStatus := ptrace.NewSpanStatus() spanStatus.SetMessage("message") - spanStatus.SetCode(pdata.StatusCode(1337)) + spanStatus.SetCode(ptrace.StatusCode(1337)) return spanStatus }(), @@ -624,7 +625,7 @@ func (t *mockTransport) Flush(ctx context.Context) bool { type PushTraceDataTestCase struct { testName string // input - td pdata.Traces + td ptrace.Traces // output called bool } @@ -633,14 +634,14 @@ func TestPushTraceData(t *testing.T) { testCases := []PushTraceDataTestCase{ { testName: "with no resources", - td: pdata.NewTraces(), + td: ptrace.NewTraces(), called: false, }, { testName: "with no libraries", - td: func() pdata.Traces { - traces := pdata.NewTraces() - resourceSpans := pdata.NewResourceSpans() + td: func() ptrace.Traces { + traces := ptrace.NewTraces() + resourceSpans := ptrace.NewResourceSpans() tgt := traces.ResourceSpans().AppendEmpty() resourceSpans.CopyTo(tgt) return traces @@ -649,8 +650,8 @@ func TestPushTraceData(t *testing.T) { }, { testName: "with no spans", - td: func() pdata.Traces { - traces := pdata.NewTraces() + td: func() ptrace.Traces { + traces := ptrace.NewTraces() resourceSpans := traces.ResourceSpans() resourceSpans.AppendEmpty().ScopeSpans().AppendEmpty() return traces @@ -659,8 +660,8 @@ func TestPushTraceData(t *testing.T) { }, { testName: "with full trace", - td: func() pdata.Traces { - traces := pdata.NewTraces() + td: func() ptrace.Traces { + traces := ptrace.NewTraces() resourceSpans := traces.ResourceSpans() resourceSpans.AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() return traces diff --git a/exporter/sentryexporter/utils.go b/exporter/sentryexporter/utils.go index 100f882beeee..0cf866c604d0 100644 --- a/exporter/sentryexporter/utils.go +++ b/exporter/sentryexporter/utils.go @@ -17,11 +17,11 @@ package sentryexporter // import "github.com/open-telemetry/opentelemetry-collec import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) // unixNanoToTime converts UNIX Epoch time in nanoseconds // to a Time struct. -func unixNanoToTime(u pdata.Timestamp) time.Time { +func unixNanoToTime(u pcommon.Timestamp) time.Time { return time.Unix(0, int64(u)).UTC() } diff --git a/exporter/signalfxexporter/dpclient.go b/exporter/signalfxexporter/dpclient.go index 3f64fda520d3..e2ecee6fe35b 100644 --- a/exporter/signalfxexporter/dpclient.go +++ b/exporter/signalfxexporter/dpclient.go @@ -28,8 +28,7 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation" @@ -43,7 +42,7 @@ type sfxClientBase struct { zippers sync.Pool } -var metricsMarshaler = otlp.NewJSONMetricsMarshaler() +var metricsMarshaler = pmetric.NewJSONMarshaler() // avoid attempting to compress things that fit into a single ethernet frame func (s *sfxClientBase) getReader(b []byte) (io.Reader, bool, error) { @@ -75,7 +74,7 @@ type sfxDPClient struct { func (s *sfxDPClient) pushMetricsData( ctx context.Context, - md pdata.Metrics, + md pmetric.Metrics, ) (droppedDataPoints int, err error) { rms := md.ResourceMetrics() if rms.Len() == 0 { @@ -91,7 +90,7 @@ func (s *sfxDPClient) pushMetricsData( } } - // All metrics in the pdata.Metrics will have the same access token because of the BatchPerResourceMetrics. + // All metrics in the pmetric.Metrics will have the same access token because of the BatchPerResourceMetrics. metricToken := s.retrieveAccessToken(rms.At(0)) sfxDataPoints := s.converter.MetricsToSignalFxV2(md) @@ -180,7 +179,7 @@ func (s *sfxDPClient) encodeBody(dps []*sfxpb.DataPoint) (bodyReader io.Reader, return s.getReader(body) } -func (s *sfxDPClient) retrieveAccessToken(md pdata.ResourceMetrics) string { +func (s *sfxDPClient) retrieveAccessToken(md pmetric.ResourceMetrics) string { if !s.accessTokenPassthrough { // Nothing to do if token is pass through not configured or resource is nil. return "" diff --git a/exporter/signalfxexporter/eventclient.go b/exporter/signalfxexporter/eventclient.go index 7cc267469e2c..a89c15c559dc 100644 --- a/exporter/signalfxexporter/eventclient.go +++ b/exporter/signalfxexporter/eventclient.go @@ -24,7 +24,8 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation" @@ -38,7 +39,7 @@ type sfxEventClient struct { accessTokenPassthrough bool } -func (s *sfxEventClient) pushLogsData(ctx context.Context, ld pdata.Logs) (int, error) { +func (s *sfxEventClient) pushLogsData(ctx context.Context, ld plog.Logs) (int, error) { rls := ld.ResourceLogs() if rls.Len() == 0 { return 0, nil @@ -115,9 +116,9 @@ func (s *sfxEventClient) encodeBody(events []*sfxpb.Event) (bodyReader io.Reader return s.getReader(body) } -func (s *sfxEventClient) retrieveAccessToken(rl pdata.ResourceLogs) string { +func (s *sfxEventClient) retrieveAccessToken(rl plog.ResourceLogs) string { attrs := rl.Resource().Attributes() - if accessToken, ok := attrs.Get(splunk.SFxAccessTokenLabel); ok && accessToken.Type() == pdata.ValueTypeString { + if accessToken, ok := attrs.Get(splunk.SFxAccessTokenLabel); ok && accessToken.Type() == pcommon.ValueTypeString { return accessToken.StringVal() } return "" diff --git a/exporter/signalfxexporter/exporter.go b/exporter/signalfxexporter/exporter.go index d9c3aa5e3be6..0bcf6b699640 100644 --- a/exporter/signalfxexporter/exporter.go +++ b/exporter/signalfxexporter/exporter.go @@ -26,7 +26,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions" @@ -57,9 +58,9 @@ func (sme *signalfMetadataExporter) ConsumeMetadata(metadata []*metadata.Metadat } type signalfxExporter struct { - pushMetricsData func(ctx context.Context, md pdata.Metrics) (droppedTimeSeries int, err error) + pushMetricsData func(ctx context.Context, md pmetric.Metrics) (droppedTimeSeries int, err error) pushMetadata func(metadata []*metadata.MetadataUpdate) error - pushLogsData func(ctx context.Context, ld pdata.Logs) (droppedLogRecords int, err error) + pushLogsData func(ctx context.Context, ld plog.Logs) (droppedLogRecords int, err error) hostMetadataSyncer *hostmetadata.Syncer } @@ -189,7 +190,7 @@ func newEventExporter(config *Config, logger *zap.Logger) (*signalfxExporter, er }, nil } -func (se *signalfxExporter) pushMetrics(ctx context.Context, md pdata.Metrics) error { +func (se *signalfxExporter) pushMetrics(ctx context.Context, md pmetric.Metrics) error { _, err := se.pushMetricsData(ctx, md) if err == nil && se.hostMetadataSyncer != nil { se.hostMetadataSyncer.Sync(md) @@ -197,7 +198,7 @@ func (se *signalfxExporter) pushMetrics(ctx context.Context, md pdata.Metrics) e return err } -func (se *signalfxExporter) pushLogs(ctx context.Context, ld pdata.Logs) error { +func (se *signalfxExporter) pushLogs(ctx context.Context, ld plog.Logs) error { _, err := se.pushLogsData(ctx, ld) return err } diff --git a/exporter/signalfxexporter/exporter_test.go b/exporter/signalfxexporter/exporter_test.go index f5b951f90b23..d86ea837ffcd 100644 --- a/exporter/signalfxexporter/exporter_test.go +++ b/exporter/signalfxexporter/exporter_test.go @@ -37,7 +37,9 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions" @@ -116,13 +118,13 @@ func TestNew(t *testing.T) { } func TestConsumeMetrics(t *testing.T) { - smallBatch := pdata.NewMetrics() + smallBatch := pmetric.NewMetrics() rm := smallBatch.ResourceMetrics().AppendEmpty() ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("test_gauge") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() dp.Attributes().InsertString("k0", "v0") dp.Attributes().InsertString("k1", "v1") @@ -130,7 +132,7 @@ func TestConsumeMetrics(t *testing.T) { tests := []struct { name string - md pdata.Metrics + md pmetric.Metrics httpResponseCode int retryAfter int numDroppedTimeSeries int @@ -241,8 +243,8 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { fromHeaders := "AccessTokenFromClientHeaders" fromLabels := []string{"AccessTokenFromLabel0", "AccessTokenFromLabel1"} - validMetricsWithToken := func(includeToken bool, token string) pdata.Metrics { - out := pdata.NewMetrics() + validMetricsWithToken := func(includeToken bool, token string) pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() if includeToken { @@ -253,7 +255,7 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { m := ilm.Metrics().AppendEmpty() m.SetName("test_gauge") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() dp.Attributes().InsertString("k0", "v0") @@ -265,7 +267,7 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { tests := []struct { name string accessTokenPassthrough bool - metrics pdata.Metrics + metrics pmetric.Metrics additionalHeaders map[string]string pushedTokens []string }{ @@ -284,7 +286,7 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { { name: "don't passthrough access token and included in md", accessTokenPassthrough: false, - metrics: func() pdata.Metrics { + metrics: func() pmetric.Metrics { forFirstToken := validMetricsWithToken(true, fromLabels[0]) tgt := forFirstToken.ResourceMetrics().AppendEmpty() validMetricsWithToken(true, fromLabels[1]).ResourceMetrics().At(0).CopyTo(tgt) @@ -310,14 +312,14 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { { name: "use token from header when resource is nil", accessTokenPassthrough: true, - metrics: func() pdata.Metrics { - out := pdata.NewMetrics() + metrics: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("test_gauge") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() dp.Attributes().InsertString("k0", "v0") dp.Attributes().InsertString("k1", "v1") @@ -330,7 +332,7 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { { name: "multiple tokens passed through", accessTokenPassthrough: true, - metrics: func() pdata.Metrics { + metrics: func() pmetric.Metrics { forFirstToken := validMetricsWithToken(true, fromLabels[0]) forSecondToken := validMetricsWithToken(true, fromLabels[1]) forSecondToken.ResourceMetrics().EnsureCapacity(2) @@ -343,7 +345,7 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { { name: "multiple tokens passed through - multiple md with same token", accessTokenPassthrough: true, - metrics: func() pdata.Metrics { + metrics: func() pmetric.Metrics { forFirstToken := validMetricsWithToken(true, fromLabels[1]) forSecondToken := validMetricsWithToken(true, fromLabels[0]) moreForSecondToken := validMetricsWithToken(true, fromLabels[1]) @@ -359,7 +361,7 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { { name: "multiple tokens passed through - multiple md with same token grouped together", accessTokenPassthrough: true, - metrics: func() pdata.Metrics { + metrics: func() pmetric.Metrics { forFirstToken := validMetricsWithToken(true, fromLabels[0]) forSecondToken := validMetricsWithToken(true, fromLabels[1]) moreForSecondToken := validMetricsWithToken(true, fromLabels[1]) @@ -375,7 +377,7 @@ func TestConsumeMetricsWithAccessTokenPassthrough(t *testing.T) { { name: "multiple tokens passed through - one corrupted", accessTokenPassthrough: true, - metrics: func() pdata.Metrics { + metrics: func() pmetric.Metrics { forFirstToken := validMetricsWithToken(true, fromLabels[0]) forSecondToken := validMetricsWithToken(false, fromLabels[1]) forSecondToken.ResourceMetrics().EnsureCapacity(2) @@ -469,18 +471,18 @@ func TestNewEventExporter(t *testing.T) { assert.Error(t, err) } -func makeSampleResourceLogs() pdata.Logs { - out := pdata.NewLogs() +func makeSampleResourceLogs() plog.Logs { + out := plog.NewLogs() l := out.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() - l.SetTimestamp(pdata.Timestamp(1000)) + l.SetTimestamp(pcommon.Timestamp(1000)) attrs := l.Attributes() attrs.InsertString("k0", "v0") attrs.InsertString("k1", "v1") attrs.InsertString("k2", "v2") - propMapVal := pdata.NewValueMap() + propMapVal := pcommon.NewValueMap() propMap := propMapVal.MapVal() propMap.InsertString("env", "prod") propMap.InsertBool("isActive", true) @@ -488,8 +490,8 @@ func makeSampleResourceLogs() pdata.Logs { propMap.InsertDouble("temp", 40.5) propMap.Sort() attrs.Insert("com.splunk.signalfx.event_properties", propMapVal) - attrs.Insert("com.splunk.signalfx.event_category", pdata.NewValueInt(int64(sfxpb.EventCategory_USER_DEFINED))) - attrs.Insert("com.splunk.signalfx.event_type", pdata.NewValueString("shutdown")) + attrs.Insert("com.splunk.signalfx.event_category", pcommon.NewValueInt(int64(sfxpb.EventCategory_USER_DEFINED))) + attrs.Insert("com.splunk.signalfx.event_type", pcommon.NewValueString("shutdown")) l.Attributes().Sort() @@ -499,7 +501,7 @@ func makeSampleResourceLogs() pdata.Logs { func TestConsumeEventData(t *testing.T) { tests := []struct { name string - resourceLogs pdata.Logs + resourceLogs plog.Logs reqTestFunc func(t *testing.T, r *http.Request) httpResponseCode int numDroppedLogRecords int @@ -513,7 +515,7 @@ func TestConsumeEventData(t *testing.T) { }, { name: "no_event_attribute", - resourceLogs: func() pdata.Logs { + resourceLogs: func() plog.Logs { out := makeSampleResourceLogs() attrs := out.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes() attrs.Delete("com.splunk.signalfx.event_category") @@ -526,11 +528,11 @@ func TestConsumeEventData(t *testing.T) { }, { name: "nonconvertible_log_attrs", - resourceLogs: func() pdata.Logs { + resourceLogs: func() plog.Logs { out := makeSampleResourceLogs() attrs := out.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes() - mapAttr := pdata.NewValueMap() + mapAttr := pcommon.NewValueMap() attrs.Insert("map", mapAttr) propsAttrs, _ := attrs.Get("com.splunk.signalfx.event_properties") @@ -601,7 +603,7 @@ func TestConsumeLogsDataWithAccessTokenPassthrough(t *testing.T) { fromHeaders := "AccessTokenFromClientHeaders" fromLabels := "AccessTokenFromLabel" - newLogData := func(includeToken bool) pdata.Logs { + newLogData := func(includeToken bool) plog.Logs { out := makeSampleResourceLogs() makeSampleResourceLogs().ResourceLogs().At(0).CopyTo(out.ResourceLogs().AppendEmpty()) @@ -683,8 +685,8 @@ func TestConsumeLogsDataWithAccessTokenPassthrough(t *testing.T) { } } -func generateLargeDPBatch() pdata.Metrics { - md := pdata.NewMetrics() +func generateLargeDPBatch() pmetric.Metrics { + md := pmetric.NewMetrics() md.ResourceMetrics().EnsureCapacity(6500) ts := time.Now() @@ -694,10 +696,10 @@ func generateLargeDPBatch() pdata.Metrics { m := ilm.Metrics().AppendEmpty() m.SetName("test_" + strconv.Itoa(i)) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(ts)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(ts)) dp.Attributes().InsertString("k0", "v0") dp.Attributes().InsertString("k1", "v1") dp.SetIntVal(int64(i)) @@ -706,8 +708,8 @@ func generateLargeDPBatch() pdata.Metrics { return md } -func generateLargeEventBatch() pdata.Logs { - out := pdata.NewLogs() +func generateLargeEventBatch() plog.Logs { + out := plog.NewLogs() logs := out.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() batchSize := 65000 @@ -717,7 +719,7 @@ func generateLargeEventBatch() pdata.Logs { lr := logs.AppendEmpty() lr.Attributes().InsertString("k0", "k1") lr.Attributes().InsertNull("com.splunk.signalfx.event_category") - lr.SetTimestamp(pdata.NewTimestampFromTime(ts)) + lr.SetTimestamp(pcommon.NewTimestampFromTime(ts)) } return out @@ -990,7 +992,7 @@ func TestConsumeMetadata(t *testing.T) { func BenchmarkExporterConsumeData(b *testing.B) { batchSize := 1000 - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() tmd := testMetricsData() for i := 0; i < batchSize; i++ { tmd.ResourceMetrics().At(0).CopyTo(metrics.ResourceMetrics().AppendEmpty()) diff --git a/exporter/signalfxexporter/factory_test.go b/exporter/signalfxexporter/factory_test.go index 29295d39458a..e7dfe92d7170 100644 --- a/exporter/signalfxexporter/factory_test.go +++ b/exporter/signalfxexporter/factory_test.go @@ -30,7 +30,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configtest" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation" @@ -312,22 +313,22 @@ func TestCreateMetricsExporterWithEmptyExcludeMetrics(t *testing.T) { assert.Equal(t, 0, len(config.ExcludeMetrics)) } -func testMetricsData() pdata.Metrics { - md := pdata.NewMetrics() +func testMetricsData() pmetric.Metrics { + md := pmetric.NewMetrics() ms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() m1 := ms.AppendEmpty() m1.SetName("system.memory.usage") m1.SetDescription("Bytes of memory in use") m1.SetUnit("bytes") - m1.SetDataType(pdata.MetricDataTypeGauge) + m1.SetDataType(pmetric.MetricDataTypeGauge) dp11 := m1.Gauge().DataPoints().AppendEmpty() dp11.Attributes().InsertString("state", "used") dp11.Attributes().InsertString("host", "host0") dp11.Attributes().InsertString("kubernetes_node", "node0") dp11.Attributes().InsertString("kubernetes_cluster", "cluster0") dp11.Attributes().Sort() - dp11.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp11.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp11.SetIntVal(4e9) dp12 := m1.Gauge().DataPoints().AppendEmpty() dp12.Attributes().InsertString("state", "free") @@ -335,121 +336,121 @@ func testMetricsData() pdata.Metrics { dp12.Attributes().InsertString("kubernetes_node", "node0") dp12.Attributes().InsertString("kubernetes_cluster", "cluster0") dp12.Attributes().Sort() - dp12.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp12.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp12.SetIntVal(6e9) m2 := ms.AppendEmpty() m2.SetName("system.disk.io") m2.SetDescription("Disk I/O.") - m2.SetDataType(pdata.MetricDataTypeSum) + m2.SetDataType(pmetric.MetricDataTypeSum) m2.Sum().SetIsMonotonic(true) - m2.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m2.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp21 := m2.Sum().DataPoints().AppendEmpty() dp21.Attributes().InsertString("host", "host0") dp21.Attributes().InsertString("direction", "read") dp21.Attributes().InsertString("device", "sda1") dp21.Attributes().Sort() - dp21.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp21.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp21.SetIntVal(1e9) dp22 := m2.Sum().DataPoints().AppendEmpty() dp22.Attributes().InsertString("host", "host0") dp22.Attributes().InsertString("direction", "read") dp22.Attributes().InsertString("device", "sda2") dp22.Attributes().Sort() - dp22.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp22.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp22.SetIntVal(2e9) dp23 := m2.Sum().DataPoints().AppendEmpty() dp23.Attributes().InsertString("host", "host0") dp23.Attributes().InsertString("direction", "write") dp23.Attributes().InsertString("device", "sda1") dp23.Attributes().Sort() - dp23.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp23.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp23.SetIntVal(3e9) dp24 := m2.Sum().DataPoints().AppendEmpty() dp24.Attributes().InsertString("host", "host0") dp24.Attributes().InsertString("direction", "write") dp24.Attributes().InsertString("device", "sda2") dp24.Attributes().Sort() - dp24.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp24.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp24.SetIntVal(8e9) m3 := ms.AppendEmpty() m3.SetName("system.disk.operations") m3.SetDescription("Disk operations count.") m3.SetUnit("bytes") - m3.SetDataType(pdata.MetricDataTypeSum) + m3.SetDataType(pmetric.MetricDataTypeSum) m3.Sum().SetIsMonotonic(true) - m3.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m3.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp31 := m3.Sum().DataPoints().AppendEmpty() dp31.Attributes().InsertString("host", "host0") dp31.Attributes().InsertString("direction", "write") dp31.Attributes().InsertString("device", "sda1") dp31.Attributes().Sort() - dp31.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp31.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp31.SetIntVal(4e3) dp32 := m3.Sum().DataPoints().AppendEmpty() dp32.Attributes().InsertString("host", "host0") dp32.Attributes().InsertString("direction", "read") dp32.Attributes().InsertString("device", "sda2") dp32.Attributes().Sort() - dp32.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp32.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp32.SetIntVal(6e3) dp33 := m3.Sum().DataPoints().AppendEmpty() dp33.Attributes().InsertString("host", "host0") dp33.Attributes().InsertString("direction", "write") dp33.Attributes().InsertString("device", "sda1") dp33.Attributes().Sort() - dp33.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp33.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp33.SetIntVal(1e3) dp34 := m3.Sum().DataPoints().AppendEmpty() dp34.Attributes().InsertString("host", "host0") dp34.Attributes().InsertString("direction", "write") dp34.Attributes().InsertString("device", "sda2") dp34.Attributes().Sort() - dp34.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp34.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp34.SetIntVal(5e3) m4 := ms.AppendEmpty() m4.SetName("system.disk.operations") m4.SetDescription("Disk operations count.") m4.SetUnit("bytes") - m4.SetDataType(pdata.MetricDataTypeSum) + m4.SetDataType(pmetric.MetricDataTypeSum) m4.Sum().SetIsMonotonic(true) - m4.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m4.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp41 := m4.Sum().DataPoints().AppendEmpty() dp41.Attributes().InsertString("host", "host0") dp41.Attributes().InsertString("direction", "read") dp41.Attributes().InsertString("device", "sda1") dp41.Attributes().Sort() - dp41.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000060, 0))) + dp41.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000060, 0))) dp41.SetIntVal(6e3) dp42 := m4.Sum().DataPoints().AppendEmpty() dp42.Attributes().InsertString("host", "host0") dp42.Attributes().InsertString("direction", "read") dp42.Attributes().InsertString("device", "sda2") dp42.Attributes().Sort() - dp42.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000060, 0))) + dp42.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000060, 0))) dp42.SetIntVal(8e3) dp43 := m4.Sum().DataPoints().AppendEmpty() dp43.Attributes().InsertString("host", "host0") dp43.Attributes().InsertString("direction", "write") dp43.Attributes().InsertString("device", "sda1") dp43.Attributes().Sort() - dp43.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000060, 0))) + dp43.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000060, 0))) dp43.SetIntVal(3e3) dp44 := m4.Sum().DataPoints().AppendEmpty() dp44.Attributes().InsertString("host", "host0") dp44.Attributes().InsertString("direction", "write") dp44.Attributes().InsertString("device", "sda2") dp44.Attributes().Sort() - dp44.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000060, 0))) + dp44.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000060, 0))) dp44.SetIntVal(7e3) m5 := ms.AppendEmpty() m5.SetName("system.network.io") m5.SetDescription("The number of bytes transmitted and received") m5.SetUnit("bytes") - m5.SetDataType(pdata.MetricDataTypeGauge) + m5.SetDataType(pmetric.MetricDataTypeGauge) dp51 := m5.Gauge().DataPoints().AppendEmpty() dp51.Attributes().InsertString("host", "host0") dp51.Attributes().InsertString("direction", "receive") @@ -457,7 +458,7 @@ func testMetricsData() pdata.Metrics { dp51.Attributes().InsertString("kubernetes_node", "node0") dp51.Attributes().InsertString("kubernetes_cluster", "cluster0") dp51.Attributes().Sort() - dp51.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp51.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp51.SetIntVal(4e9) dp52 := m5.Gauge().DataPoints().AppendEmpty() dp52.Attributes().InsertString("host", "host0") @@ -466,13 +467,13 @@ func testMetricsData() pdata.Metrics { dp52.Attributes().InsertString("kubernetes_node", "node0") dp52.Attributes().InsertString("kubernetes_cluster", "cluster0") dp52.Attributes().Sort() - dp52.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp52.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp52.SetIntVal(6e9) m6 := ms.AppendEmpty() m6.SetName("system.network.packets") m6.SetDescription("The number of packets transferred") - m6.SetDataType(pdata.MetricDataTypeGauge) + m6.SetDataType(pmetric.MetricDataTypeGauge) dp61 := m6.Gauge().DataPoints().AppendEmpty() dp61.Attributes().InsertString("host", "host0") dp61.Attributes().InsertString("direction", "receive") @@ -480,7 +481,7 @@ func testMetricsData() pdata.Metrics { dp61.Attributes().InsertString("kubernetes_node", "node0") dp61.Attributes().InsertString("kubernetes_cluster", "cluster0") dp61.Attributes().Sort() - dp61.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp61.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp61.SetIntVal(200) dp62 := m6.Gauge().DataPoints().AppendEmpty() dp62.Attributes().InsertString("host", "host0") @@ -489,41 +490,41 @@ func testMetricsData() pdata.Metrics { dp62.Attributes().InsertString("kubernetes_node", "node0") dp62.Attributes().InsertString("kubernetes_cluster", "cluster0") dp62.Attributes().Sort() - dp62.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp62.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp62.SetIntVal(150) m7 := ms.AppendEmpty() m7.SetName("container.memory.working_set") m7.SetUnit("bytes") - m7.SetDataType(pdata.MetricDataTypeGauge) + m7.SetDataType(pmetric.MetricDataTypeGauge) dp71 := m7.Gauge().DataPoints().AppendEmpty() dp71.Attributes().InsertString("host", "host0") dp71.Attributes().InsertString("kubernetes_node", "node0") dp71.Attributes().InsertString("kubernetes_cluster", "cluster0") dp71.Attributes().Sort() - dp71.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp71.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp71.SetIntVal(1000) m8 := ms.AppendEmpty() m8.SetName("container.memory.page_faults") - m8.SetDataType(pdata.MetricDataTypeGauge) + m8.SetDataType(pmetric.MetricDataTypeGauge) dp81 := m8.Gauge().DataPoints().AppendEmpty() dp81.Attributes().InsertString("host", "host0") dp81.Attributes().InsertString("kubernetes_node", "node0") dp81.Attributes().InsertString("kubernetes_cluster", "cluster0") dp81.Attributes().Sort() - dp81.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp81.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp81.SetIntVal(1000) m9 := ms.AppendEmpty() m9.SetName("container.memory.major_page_faults") - m9.SetDataType(pdata.MetricDataTypeGauge) + m9.SetDataType(pmetric.MetricDataTypeGauge) dp91 := m9.Gauge().DataPoints().AppendEmpty() dp91.Attributes().InsertString("host", "host0") dp91.Attributes().InsertString("kubernetes_node", "node0") dp91.Attributes().InsertString("kubernetes_cluster", "cluster0") dp91.Attributes().Sort() - dp91.SetTimestamp(pdata.NewTimestampFromTime(time.Unix(1596000000, 0))) + dp91.SetTimestamp(pcommon.NewTimestampFromTime(time.Unix(1596000000, 0))) dp91.SetIntVal(1000) return md @@ -661,15 +662,15 @@ func TestDefaultExcludes_not_translated(t *testing.T) { require.Equal(t, 0, len(dps)) } -func getMetrics(metrics []map[string]string) pdata.Metrics { - md := pdata.NewMetrics() +func getMetrics(metrics []map[string]string) pmetric.Metrics { + md := pmetric.NewMetrics() ilms := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilms.Metrics().EnsureCapacity(len(metrics)) for _, mp := range metrics { m := ilms.Metrics().AppendEmpty() // Set data type to some arbitrary since it does not matter for this test. - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) dp := m.Sum().DataPoints().AppendEmpty() dp.SetIntVal(0) attributesMap := dp.Attributes() diff --git a/exporter/signalfxexporter/go.mod b/exporter/signalfxexporter/go.mod index 219849c5488b..6ed911926415 100644 --- a/exporter/signalfxexporter/go.mod +++ b/exporter/signalfxexporter/go.mod @@ -14,8 +14,8 @@ require ( github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201202163743-65b4fa925fc8 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -26,10 +26,10 @@ require ( ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -38,7 +38,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -52,7 +52,6 @@ require ( github.com/signalfx/golib/v3 v3.3.13 // indirect github.com/signalfx/sapm-proto v0.4.0 // indirect github.com/smartystreets/goconvey v1.6.4 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect @@ -64,9 +63,8 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -74,6 +72,7 @@ require ( require ( github.com/jaegertracing/jaeger v1.32.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 ) @@ -88,3 +87,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperre replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata => ../../pkg/experimentalmetricmetadata replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx => ../../pkg/translator/signalfx + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/signalfxexporter/go.sum b/exporter/signalfxexporter/go.sum index dfe5146c37a6..e7d34bf58c4b 100644 --- a/exporter/signalfxexporter/go.sum +++ b/exporter/signalfxexporter/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -20,16 +19,14 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -42,7 +39,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -60,8 +56,8 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -121,7 +117,6 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -165,8 +160,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -245,9 +240,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1 github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -278,10 +270,12 @@ go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -292,7 +286,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -334,8 +328,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -370,7 +364,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -401,7 +394,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -412,7 +404,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -426,7 +417,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/exporter/signalfxexporter/internal/correlation/correlation.go b/exporter/signalfxexporter/internal/correlation/correlation.go index 210fe10306d4..ff6ff43dcadd 100644 --- a/exporter/signalfxexporter/internal/correlation/correlation.go +++ b/exporter/signalfxexporter/internal/correlation/correlation.go @@ -23,7 +23,7 @@ import ( "github.com/signalfx/signalfx-agent/pkg/apm/correlations" "github.com/signalfx/signalfx-agent/pkg/apm/tracetracker" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" @@ -91,7 +91,7 @@ func newCorrelationClient(cfg *Config, accessToken string, params component.Expo // AddSpans processes the provided spans to correlate the services and environment observed // to the resources (host, pods, etc.) emitting the spans. -func (cor *Tracker) AddSpans(ctx context.Context, traces pdata.Traces) error { +func (cor *Tracker) AddSpans(ctx context.Context, traces ptrace.Traces) error { if cor == nil || traces.ResourceSpans().Len() == 0 { return nil } diff --git a/exporter/signalfxexporter/internal/correlation/correlation_test.go b/exporter/signalfxexporter/internal/correlation/correlation_test.go index 5eae0bb6f15a..bbe8491228d7 100644 --- a/exporter/signalfxexporter/internal/correlation/correlation_test.go +++ b/exporter/signalfxexporter/internal/correlation/correlation_test.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestTrackerAddSpans(t *testing.T) { @@ -37,13 +37,13 @@ func TestTrackerAddSpans(t *testing.T) { require.NoError(t, err) assert.NotNil(t, tracker.correlation, "correlation context should be set") - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() attr := rs.Resource().Attributes() attr.InsertString("host.name", "localhost") // Add empty first, should ignore. - tracker.AddSpans(context.Background(), pdata.NewTraces()) + tracker.AddSpans(context.Background(), ptrace.NewTraces()) assert.Nil(t, tracker.traceTracker) tracker.AddSpans(context.Background(), traces) diff --git a/exporter/signalfxexporter/internal/correlation/spanshims.go b/exporter/signalfxexporter/internal/correlation/spanshims.go index 026a9fd3fe92..81a2062da748 100644 --- a/exporter/signalfxexporter/internal/correlation/spanshims.go +++ b/exporter/signalfxexporter/internal/correlation/spanshims.go @@ -16,8 +16,8 @@ package correlation // import "github.com/open-telemetry/opentelemetry-collector import ( "github.com/signalfx/signalfx-agent/pkg/apm/tracetracker" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/ptrace" ) var ( @@ -26,7 +26,7 @@ var ( ) type spanWrap struct { - pdata.ResourceSpans + ptrace.ResourceSpans } func (s spanWrap) Environment() (string, bool) { @@ -72,7 +72,7 @@ func (s spanWrap) NumTags() int { } type spanListWrap struct { - pdata.ResourceSpansSlice + ptrace.ResourceSpansSlice } func (s spanListWrap) Len() int { diff --git a/exporter/signalfxexporter/internal/correlation/spanshims_test.go b/exporter/signalfxexporter/internal/correlation/spanshims_test.go index b7c7c9b8a6c2..90c41470c6e0 100644 --- a/exporter/signalfxexporter/internal/correlation/spanshims_test.go +++ b/exporter/signalfxexporter/internal/correlation/spanshims_test.go @@ -19,11 +19,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestSpanShimList(t *testing.T) { - spans := pdata.NewResourceSpansSlice() + spans := ptrace.NewResourceSpansSlice() spans.EnsureCapacity(2) s1 := spans.AppendEmpty() s2 := spans.AppendEmpty() @@ -34,13 +34,13 @@ func TestSpanShimList(t *testing.T) { } func TestSpanShimList_Empty(t *testing.T) { - spans := pdata.NewResourceSpansSlice() + spans := ptrace.NewResourceSpansSlice() wrapped := spanListWrap{spans} assert.Equal(t, 0, wrapped.Len()) } func TestSpanShim_Service(t *testing.T) { - span := pdata.NewResourceSpans() + span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() attr.InsertString("service.name", "shopping-cart") @@ -54,7 +54,7 @@ func TestSpanShim_Service(t *testing.T) { } func TestSpanShim_Environment(t *testing.T) { - span := pdata.NewResourceSpans() + span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() attr.InsertString("deployment.environment", "prod") @@ -68,7 +68,7 @@ func TestSpanShim_Environment(t *testing.T) { } func TestSpanShim_SignalfxEnvironment(t *testing.T) { - span := pdata.NewResourceSpans() + span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() attr.InsertString("environment", "prod") @@ -82,7 +82,7 @@ func TestSpanShim_SignalfxEnvironment(t *testing.T) { } func TestSpanShim_Missing(t *testing.T) { - span := pdata.NewResourceSpans() + span := ptrace.NewResourceSpans() wrapped := spanWrap{span} _, ok := wrapped.Environment() @@ -92,7 +92,7 @@ func TestSpanShim_Missing(t *testing.T) { } func TestSpanShim_ResourceNil(t *testing.T) { - span := pdata.NewResourceSpans() + span := ptrace.NewResourceSpans() wrapped := spanWrap{span} @@ -107,7 +107,7 @@ func TestSpanShim_ResourceNil(t *testing.T) { } func TestSpanShim_Tags(t *testing.T) { - span := pdata.NewResourceSpans() + span := ptrace.NewResourceSpans() res := span.Resource() attr := res.Attributes() attr.InsertString("tag1", "tag1val") diff --git a/exporter/signalfxexporter/internal/hostmetadata/metadata.go b/exporter/signalfxexporter/internal/hostmetadata/metadata.go index 0647c09ab23a..44062059456c 100644 --- a/exporter/signalfxexporter/internal/hostmetadata/metadata.go +++ b/exporter/signalfxexporter/internal/hostmetadata/metadata.go @@ -17,7 +17,8 @@ package hostmetadata // import "github.com/open-telemetry/opentelemetry-collecto import ( "sync" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/dimensions" @@ -40,7 +41,7 @@ func NewSyncer(logger *zap.Logger, dimClient dimensions.MetadataUpdateClient) *S } } -func (s *Syncer) Sync(md pdata.Metrics) { +func (s *Syncer) Sync(md pmetric.Metrics) { // skip if already synced or if metrics data is empty if md.ResourceMetrics().Len() == 0 { return @@ -50,7 +51,7 @@ func (s *Syncer) Sync(md pdata.Metrics) { }) } -func (s *Syncer) syncOnResource(res pdata.Resource) { +func (s *Syncer) syncOnResource(res pcommon.Resource) { // If resourcedetection processor is enabled, all the metrics should have resource attributes // that can be used to update host metadata. // Based of this assumption we check just one ResourceMetrics object, diff --git a/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go b/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go index f1f9ffc9aa6b..cc5519d76a18 100644 --- a/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go +++ b/exporter/signalfxexporter/internal/hostmetadata/metadata_test.go @@ -26,8 +26,8 @@ import ( "github.com/shirou/gopsutil/v3/mem" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -46,7 +46,7 @@ func TestSyncMetadata(t *testing.T) { hostStat host.InfoStat hostStatErr error pushFail bool - metricsData pdata.Metrics + metricsData pmetric.Metrics wantMetadataUpdate []*metadata.MetadataUpdate wantLogs []string }{ @@ -238,7 +238,7 @@ func TestSyncMetadata(t *testing.T) { { name: "empty_metrics_data", pushFail: false, - metricsData: pdata.NewMetrics(), + metricsData: pmetric.NewMetrics(), wantMetadataUpdate: nil, wantLogs: []string{}, }, @@ -305,8 +305,8 @@ func (dc *fakeDimClient) getMetadataUpdates() [][]*metadata.MetadataUpdate { return dc.metadataUpdates } -func generateSampleMetricsData(attrs map[string]string) pdata.Metrics { - m := pdata.NewMetrics() +func generateSampleMetricsData(attrs map[string]string) pmetric.Metrics { + m := pmetric.NewMetrics() rm := m.ResourceMetrics() res := rm.AppendEmpty().Resource() for k, v := range attrs { diff --git a/exporter/signalfxexporter/internal/translation/constants.go b/exporter/signalfxexporter/internal/translation/constants.go index a983274f47db..3c48b8fe3afe 100644 --- a/exporter/signalfxexporter/internal/translation/constants.go +++ b/exporter/signalfxexporter/internal/translation/constants.go @@ -22,7 +22,7 @@ const ( translation_rules: # drops opencensus.resourcetype dimension from metrics generated by receivers written # using OC data structures. This rule can be removed once the k8s_cluster and kubeletstats -# receivers have been refactored to use pdata.Metrics. These dimensions are added as a +# receivers have been refactored to use pmetric.Metrics. These dimensions are added as a # result of the conversion here https://github.com/open-telemetry/opentelemetry-collector/blob/v0.22.0/translator/internaldata/oc_to_resource.go#L128. # Dropping these dimensions will ensure MTSes aren't broken when the receivers are # refactored and this resource type dimension will cease to exist. diff --git a/exporter/signalfxexporter/internal/translation/converter.go b/exporter/signalfxexporter/internal/translation/converter.go index e498d35aa9b7..2d723fe730c6 100644 --- a/exporter/signalfxexporter/internal/translation/converter.go +++ b/exporter/signalfxexporter/internal/translation/converter.go @@ -22,7 +22,8 @@ import ( "unicode" sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -73,7 +74,7 @@ func NewMetricsConverter( // MetricsToSignalFxV2 converts the passed in MetricsData to SFx datapoints, // returning those datapoints and the number of time series that had to be // dropped because of errors or warnings. -func (c *MetricsConverter) MetricsToSignalFxV2(md pdata.Metrics) []*sfxpb.DataPoint { +func (c *MetricsConverter) MetricsToSignalFxV2(md pmetric.Metrics) []*sfxpb.DataPoint { var sfxDataPoints []*sfxpb.DataPoint rms := md.ResourceMetrics() @@ -128,7 +129,7 @@ func filterKeyChars(str string, nonAlphanumericDimChars string) string { // resourceToDimensions will return a set of dimension from the // resource attributes, including a cloud host id (AWSUniqueId, gcp_id, etc.) // if it can be constructed from the provided metadata. -func resourceToDimensions(res pdata.Resource) []*sfxpb.Dimension { +func resourceToDimensions(res pcommon.Resource) []*sfxpb.Dimension { var dims []*sfxpb.Dimension if hostID, ok := splunk.ResourceToHostID(res); ok && hostID.Key != splunk.HostIDKeyHost { @@ -138,7 +139,7 @@ func resourceToDimensions(res pdata.Resource) []*sfxpb.Dimension { }) } - res.Attributes().Range(func(k string, val pdata.Value) bool { + res.Attributes().Range(func(k string, val pcommon.Value) bool { // Never send the SignalFX token if k == splunk.SFxAccessTokenLabel { return true diff --git a/exporter/signalfxexporter/internal/translation/converter_test.go b/exporter/signalfxexporter/internal/translation/converter_test.go index 13eea4a4117c..3aac85a71c0f 100644 --- a/exporter/signalfxexporter/internal/translation/converter_test.go +++ b/exporter/signalfxexporter/internal/translation/converter_test.go @@ -25,8 +25,9 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter/internal/translation/dpfilters" @@ -60,133 +61,133 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { "k2": "v2", } - ts := pdata.NewTimestampFromTime(time.Unix(unixSecs, unixNSecs)) + ts := pcommon.NewTimestampFromTime(time.Unix(unixSecs, unixNSecs)) - initDoublePt := func(doublePt pdata.NumberDataPoint) { + initDoublePt := func(doublePt pmetric.NumberDataPoint) { doublePt.SetTimestamp(ts) doublePt.SetDoubleVal(doubleVal) } - initDoublePtWithLabels := func(doublePtWithLabels pdata.NumberDataPoint) { + initDoublePtWithLabels := func(doublePtWithLabels pmetric.NumberDataPoint) { initDoublePt(doublePtWithLabels) - pdata.NewMapFromRaw(labelMap).CopyTo(doublePtWithLabels.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(doublePtWithLabels.Attributes()) } - initDoublePtWithLongLabels := func(doublePtWithLabels pdata.NumberDataPoint) { + initDoublePtWithLongLabels := func(doublePtWithLabels pmetric.NumberDataPoint) { initDoublePt(doublePtWithLabels) - pdata.NewMapFromRaw(longLabelMap).CopyTo(doublePtWithLabels.Attributes()) + pcommon.NewMapFromRaw(longLabelMap).CopyTo(doublePtWithLabels.Attributes()) } differentLabelMap := map[string]interface{}{ "k00": "v00", "k11": "v11", } - initDoublePtWithDifferentLabels := func(doublePtWithDifferentLabels pdata.NumberDataPoint) { + initDoublePtWithDifferentLabels := func(doublePtWithDifferentLabels pmetric.NumberDataPoint) { initDoublePt(doublePtWithDifferentLabels) - pdata.NewMapFromRaw(differentLabelMap).CopyTo(doublePtWithDifferentLabels.Attributes()) + pcommon.NewMapFromRaw(differentLabelMap).CopyTo(doublePtWithDifferentLabels.Attributes()) } - initInt64Pt := func(int64Pt pdata.NumberDataPoint) { + initInt64Pt := func(int64Pt pmetric.NumberDataPoint) { int64Pt.SetTimestamp(ts) int64Pt.SetIntVal(int64Val) } - initInt64PtWithLabels := func(int64PtWithLabels pdata.NumberDataPoint) { + initInt64PtWithLabels := func(int64PtWithLabels pmetric.NumberDataPoint) { initInt64Pt(int64PtWithLabels) - pdata.NewMapFromRaw(labelMap).CopyTo(int64PtWithLabels.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(int64PtWithLabels.Attributes()) } histBounds := []float64{1, 2, 4} histCounts := []uint64{4, 2, 3, 7} - initHistDP := func(histDP pdata.HistogramDataPoint) { + initHistDP := func(histDP pmetric.HistogramDataPoint) { histDP.SetTimestamp(ts) histDP.SetCount(16) histDP.SetSum(100.0) histDP.SetExplicitBounds(histBounds) histDP.SetBucketCounts(histCounts) - pdata.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) } - histDP := pdata.NewHistogramDataPoint() + histDP := pmetric.NewHistogramDataPoint() initHistDP(histDP) - initHistDPNoBuckets := func(histDP pdata.HistogramDataPoint) { + initHistDPNoBuckets := func(histDP pmetric.HistogramDataPoint) { histDP.SetCount(2) histDP.SetSum(10) histDP.SetTimestamp(ts) - pdata.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) } - histDPNoBuckets := pdata.NewHistogramDataPoint() + histDPNoBuckets := pmetric.NewHistogramDataPoint() initHistDPNoBuckets(histDPNoBuckets) tests := []struct { name string - metricsFn func() pdata.Metrics + metricsFn func() pmetric.Metrics excludeMetrics []dpfilters.MetricFilter includeMetrics []dpfilters.MetricFilter wantSfxDataPoints []*sfxpb.DataPoint }{ { name: "nil_node_nil_resources_no_dims", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePt(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64Pt(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) initDoublePt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) initInt64Pt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("delta_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) initDoublePt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("delta_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) initInt64Pt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_sum_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) initDoublePt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_sum_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) initInt64Pt(m.Sum().DataPoints().AppendEmpty()) } @@ -206,33 +207,33 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "nil_node_and_resources_with_dims", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initDoublePtWithLabels(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initInt64PtWithLabels(m.Sum().DataPoints().AppendEmpty()) } @@ -248,8 +249,8 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_node_resources_dims", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k/r0", "vr0") @@ -263,13 +264,13 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } @@ -298,8 +299,8 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_node_resources_dims - long metric name", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k/r0", "vr0") @@ -313,31 +314,31 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName(fmt.Sprintf("l%sng_name", strings.Repeat("o", 256))) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName(fmt.Sprintf("l%sng_name", strings.Repeat("o", 256))) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName(fmt.Sprintf("l%sng_name", strings.Repeat("o", 256))) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } @@ -366,8 +367,8 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_node_resources_dims - long dimension name/value", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k/r0", "vr0") @@ -381,7 +382,7 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLongLabels(m.Gauge().DataPoints().AppendEmpty()) } @@ -404,8 +405,8 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_resources_cloud_partial_aws_dim", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k/r0", "vr0") @@ -417,7 +418,7 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) return out @@ -437,8 +438,8 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_resources_cloud_aws_dim", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k/r0", "vr0") @@ -451,7 +452,7 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) return out @@ -473,8 +474,8 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_resources_cloud_gcp_dim_partial", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k/r0", "vr0") @@ -485,7 +486,7 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) return out @@ -504,8 +505,8 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_resources_cloud_gcp_dim", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k/r0", "vr0") @@ -517,7 +518,7 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) return out @@ -538,26 +539,26 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { }, { name: "with_exclude_metrics_filter", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initDoublePtWithLabels(m.Sum().DataPoints().AppendEmpty()) initDoublePtWithDifferentLabels(m.Sum().DataPoints().AppendEmpty()) @@ -565,7 +566,7 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initInt64PtWithLabels(m.Sum().DataPoints().AppendEmpty()) } @@ -600,26 +601,26 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { { // To validate that filters in include serves as override to the ones in exclude list. name: "with_include_and_exclude_metrics_filter", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initDoublePtWithLabels(m.Sum().DataPoints().AppendEmpty()) initDoublePtWithDifferentLabels(m.Sum().DataPoints().AppendEmpty()) @@ -627,7 +628,7 @@ func Test_MetricDataToSignalFxV2(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initInt64PtWithLabels(m.Sum().DataPoints().AppendEmpty()) } @@ -692,9 +693,9 @@ func TestMetricDataToSignalFxV2WithTranslation(t *testing.T) { }, 1) require.NoError(t, err) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() m := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) m.SetName("metric1") dp := m.Gauge().DataPoints().AppendEmpty() dp.SetIntVal(123) @@ -732,9 +733,9 @@ func TestDimensionKeyCharsWithPeriod(t *testing.T) { }, 1) require.NoError(t, err) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() m := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) m.SetName("metric1") dp := m.Gauge().DataPoints().AppendEmpty() dp.SetIntVal(123) diff --git a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go index eb799a8aca08..a3a0c57b06a4 100644 --- a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go +++ b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2.go @@ -18,7 +18,8 @@ import ( "fmt" sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -26,8 +27,8 @@ import ( func LogRecordSliceToSignalFxV2( logger *zap.Logger, - logs pdata.LogRecordSlice, - resourceAttrs pdata.Map, + logs plog.LogRecordSlice, + resourceAttrs pcommon.Map, ) ([]*sfxpb.Event, int) { events := make([]*sfxpb.Event, 0, logs.Len()) numDroppedLogRecords := 0 @@ -45,7 +46,7 @@ func LogRecordSliceToSignalFxV2( return events, numDroppedLogRecords } -func convertLogRecord(lr pdata.LogRecord, resourceAttrs pdata.Map, logger *zap.Logger) (*sfxpb.Event, bool) { +func convertLogRecord(lr plog.LogRecord, resourceAttrs pcommon.Map, logger *zap.Logger) (*sfxpb.Event, bool) { attrs := lr.Attributes() categoryVal, ok := attrs.Get(splunk.SFxEventCategoryKey) @@ -55,13 +56,13 @@ func convertLogRecord(lr pdata.LogRecord, resourceAttrs pdata.Map, logger *zap.L var event sfxpb.Event - if categoryVal.Type() == pdata.ValueTypeInt { + if categoryVal.Type() == pcommon.ValueTypeInt { asCat := sfxpb.EventCategory(categoryVal.IntVal()) event.Category = &asCat } - if mapVal, ok := attrs.Get(splunk.SFxEventPropertiesKey); ok && mapVal.Type() == pdata.ValueTypeMap { - mapVal.MapVal().Range(func(k string, v pdata.Value) bool { + if mapVal, ok := attrs.Get(splunk.SFxEventPropertiesKey); ok && mapVal.Type() == pcommon.ValueTypeMap { + mapVal.MapVal().Range(func(k string, v pcommon.Value) bool { val, err := attributeValToPropertyVal(v) if err != nil { logger.Debug("Failed to convert log record property value to SignalFx property value", zap.Error(err), zap.String("key", k)) @@ -78,8 +79,8 @@ func convertLogRecord(lr pdata.LogRecord, resourceAttrs pdata.Map, logger *zap.L // keep a record of Resource attributes to add as dimensions // so as not to modify LogRecord attributes - resourceAttrsForDimensions := pdata.NewMap() - resourceAttrs.Range(func(k string, v pdata.Value) bool { + resourceAttrsForDimensions := pcommon.NewMap() + resourceAttrs.Range(func(k string, v pcommon.Value) bool { // LogRecord attribute takes priority if _, ok := attrs.Get(k); !ok { resourceAttrsForDimensions.Insert(k, v) @@ -87,7 +88,7 @@ func convertLogRecord(lr pdata.LogRecord, resourceAttrs pdata.Map, logger *zap.L return true }) - addDimension := func(k string, v pdata.Value) bool { + addDimension := func(k string, v pcommon.Value) bool { // Skip internal attributes switch k { case splunk.SFxEventCategoryKey: @@ -95,13 +96,13 @@ func convertLogRecord(lr pdata.LogRecord, resourceAttrs pdata.Map, logger *zap.L case splunk.SFxEventPropertiesKey: return true case splunk.SFxEventType: - if v.Type() == pdata.ValueTypeString { + if v.Type() == pcommon.ValueTypeString { event.EventType = v.StringVal() } return true } - if v.Type() != pdata.ValueTypeString { + if v.Type() != pcommon.ValueTypeString { logger.Debug("Failed to convert log record or resource attribute value to SignalFx property value, key is not a string", zap.String("key", k)) return true } @@ -123,19 +124,19 @@ func convertLogRecord(lr pdata.LogRecord, resourceAttrs pdata.Map, logger *zap.L return &event, true } -func attributeValToPropertyVal(v pdata.Value) (*sfxpb.PropertyValue, error) { +func attributeValToPropertyVal(v pcommon.Value) (*sfxpb.PropertyValue, error) { var val sfxpb.PropertyValue switch v.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: asInt := v.IntVal() val.IntValue = &asInt - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: asBool := v.BoolVal() val.BoolValue = &asBool - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: asDouble := v.DoubleVal() val.DoubleValue = &asDouble - case pdata.ValueTypeString: + case pcommon.ValueTypeString: asString := v.StringVal() val.StrValue = &asString default: diff --git a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go index 1f5b06eec056..d3ed4f6b4969 100644 --- a/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go +++ b/exporter/signalfxexporter/internal/translation/logdata_to_signalfxv2_test.go @@ -23,7 +23,8 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -51,8 +52,8 @@ func TestLogDataToSignalFxEvents(t *testing.T) { } } - buildDefaultLogs := func() pdata.Logs { - logs := pdata.NewLogs() + buildDefaultLogs := func() plog.Logs { + logs := plog.NewLogs() resourceLogs := logs.ResourceLogs() resourceLog := resourceLogs.AppendEmpty() resourceLog.Resource().Attributes().InsertString("k0", "should use ILL attr value instead") @@ -63,14 +64,14 @@ func TestLogDataToSignalFxEvents(t *testing.T) { logSlice := ilLogs.AppendEmpty().LogRecords() l := logSlice.AppendEmpty() - l.SetTimestamp(pdata.NewTimestampFromTime(now.Truncate(time.Millisecond))) + l.SetTimestamp(pcommon.NewTimestampFromTime(now.Truncate(time.Millisecond))) attrs := l.Attributes() attrs.InsertString("k0", "v0") attrs.InsertString("k1", "v1") attrs.InsertString("k2", "v2") - propMapVal := pdata.NewValueMap() + propMapVal := pcommon.NewValueMap() propMap := propMapVal.MapVal() propMap.InsertString("env", "prod") propMap.InsertBool("isActive", true) @@ -78,8 +79,8 @@ func TestLogDataToSignalFxEvents(t *testing.T) { propMap.InsertDouble("temp", 40.5) propMap.Sort() attrs.Insert("com.splunk.signalfx.event_properties", propMapVal) - attrs.Insert("com.splunk.signalfx.event_category", pdata.NewValueInt(int64(sfxpb.EventCategory_USER_DEFINED))) - attrs.Insert("com.splunk.signalfx.event_type", pdata.NewValueString("shutdown")) + attrs.Insert("com.splunk.signalfx.event_category", pcommon.NewValueInt(int64(sfxpb.EventCategory_USER_DEFINED))) + attrs.Insert("com.splunk.signalfx.event_type", pcommon.NewValueString("shutdown")) l.Attributes().Sort() @@ -89,7 +90,7 @@ func TestLogDataToSignalFxEvents(t *testing.T) { tests := []struct { name string sfxEvents []*sfxpb.Event - logData pdata.Logs + logData plog.Logs numDropped int }{ { @@ -104,10 +105,10 @@ func TestLogDataToSignalFxEvents(t *testing.T) { e.Category = nil return []*sfxpb.Event{e} }(), - logData: func() pdata.Logs { + logData: func() plog.Logs { logs := buildDefaultLogs() lrs := logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords() - lrs.At(0).Attributes().Upsert("com.splunk.signalfx.event_category", pdata.NewValueEmpty()) + lrs.At(0).Attributes().Upsert("com.splunk.signalfx.event_category", pcommon.NewValueEmpty()) return logs }(), }, diff --git a/exporter/signalfxexporter/internal/translation/translator_test.go b/exporter/signalfxexporter/internal/translation/translator_test.go index 162e6c04a380..c4956efae1dd 100644 --- a/exporter/signalfxexporter/internal/translation/translator_test.go +++ b/exporter/signalfxexporter/internal/translation/translator_test.go @@ -24,7 +24,8 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" ) @@ -2538,19 +2539,19 @@ func TestDeltaTranslatorNoMatchingMapping(t *testing.T) { func TestDeltaTranslatorMismatchedValueTypes(t *testing.T) { c := testConverter(t, map[string]string{"system.cpu.time": "system.cpu.delta"}) md1 := baseMD() - md1.SetDataType(pdata.MetricDataTypeSum) + md1.SetDataType(pmetric.MetricDataTypeSum) intTS("cpu0", "user", 1, 1, 1, md1.Sum().DataPoints().AppendEmpty()) _ = c.MetricsToSignalFxV2(wrapMetric(md1)) md2 := baseMD() - md2.SetDataType(pdata.MetricDataTypeSum) + md2.SetDataType(pmetric.MetricDataTypeSum) dblTS("cpu0", "user", 1, 1, 1, md2.Sum().DataPoints().AppendEmpty()) pts := c.MetricsToSignalFxV2(wrapMetric(md2)) idx := indexPts(pts) require.Equal(t, 1, len(idx)) } -func requireDeltaMetricOk(t *testing.T, md1, md2, md3 pdata.Metrics) ( +func requireDeltaMetricOk(t *testing.T, md1, md2, md3 pmetric.Metrics) ( []*sfxpb.DataPoint, []*sfxpb.DataPoint, ) { c := testConverter(t, map[string]string{"system.cpu.time": "system.cpu.delta"}) @@ -2980,9 +2981,9 @@ func indexPts(pts []*sfxpb.DataPoint) map[string][]*sfxpb.DataPoint { return m } -func doubleMD(secondsDelta int64, valueDelta float64) pdata.Metrics { +func doubleMD(secondsDelta int64, valueDelta float64) pmetric.Metrics { md := baseMD() - md.SetDataType(pdata.MetricDataTypeSum) + md.SetDataType(pmetric.MetricDataTypeSum) ms := md.Sum() dblTS("cpu0", "user", secondsDelta, 100, valueDelta, ms.DataPoints().AppendEmpty()) dblTS("cpu0", "system", secondsDelta, 200, valueDelta, ms.DataPoints().AppendEmpty()) @@ -2994,9 +2995,9 @@ func doubleMD(secondsDelta int64, valueDelta float64) pdata.Metrics { return wrapMetric(md) } -func intMD(secondsDelta int64, valueDelta int64) pdata.Metrics { +func intMD(secondsDelta int64, valueDelta int64) pmetric.Metrics { md := baseMD() - md.SetDataType(pdata.MetricDataTypeSum) + md.SetDataType(pmetric.MetricDataTypeSum) ms := md.Sum() intTS("cpu0", "user", secondsDelta, 100, valueDelta, ms.DataPoints().AppendEmpty()) intTS("cpu0", "system", secondsDelta, 200, valueDelta, ms.DataPoints().AppendEmpty()) @@ -3008,9 +3009,9 @@ func intMD(secondsDelta int64, valueDelta int64) pdata.Metrics { return wrapMetric(md) } -func intMDAfterReset(secondsDelta int64, valueDelta int64) pdata.Metrics { +func intMDAfterReset(secondsDelta int64, valueDelta int64) pmetric.Metrics { md := baseMD() - md.SetDataType(pdata.MetricDataTypeSum) + md.SetDataType(pmetric.MetricDataTypeSum) ms := md.Sum() intTS("cpu0", "user", secondsDelta, 0, valueDelta, ms.DataPoints().AppendEmpty()) intTS("cpu0", "system", secondsDelta, 0, valueDelta, ms.DataPoints().AppendEmpty()) @@ -3022,31 +3023,31 @@ func intMDAfterReset(secondsDelta int64, valueDelta int64) pdata.Metrics { return wrapMetric(md) } -func baseMD() pdata.Metric { - out := pdata.NewMetric() +func baseMD() pmetric.Metric { + out := pmetric.NewMetric() out.SetName("system.cpu.time") out.SetUnit("s") return out } -func dblTS(lbl0 string, lbl1 string, secondsDelta int64, v float64, valueDelta float64, out pdata.NumberDataPoint) { +func dblTS(lbl0 string, lbl1 string, secondsDelta int64, v float64, valueDelta float64, out pmetric.NumberDataPoint) { out.Attributes().InsertString("cpu", lbl0) out.Attributes().InsertString("state", lbl1) const startTime = 1600000000 - out.SetTimestamp(pdata.Timestamp(time.Duration(startTime+secondsDelta) * time.Second)) + out.SetTimestamp(pcommon.Timestamp(time.Duration(startTime+secondsDelta) * time.Second)) out.SetDoubleVal(v + valueDelta) } -func intTS(lbl0 string, lbl1 string, secondsDelta int64, v int64, valueDelta int64, out pdata.NumberDataPoint) { +func intTS(lbl0 string, lbl1 string, secondsDelta int64, v int64, valueDelta int64, out pmetric.NumberDataPoint) { out.Attributes().InsertString("cpu", lbl0) out.Attributes().InsertString("state", lbl1) const startTime = 1600000000 - out.SetTimestamp(pdata.Timestamp(time.Duration(startTime+secondsDelta) * time.Second)) + out.SetTimestamp(pcommon.Timestamp(time.Duration(startTime+secondsDelta) * time.Second)) out.SetIntVal(v + valueDelta) } -func wrapMetric(m pdata.Metric) pdata.Metrics { - out := pdata.NewMetrics() +func wrapMetric(m pmetric.Metric) pmetric.Metrics { + out := pmetric.NewMetrics() m.CopyTo(out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty()) return out } diff --git a/exporter/skywalkingexporter/go.mod b/exporter/skywalkingexporter/go.mod index b21afe8f8118..fa2f3fe029e2 100644 --- a/exporter/skywalkingexporter/go.mod +++ b/exporter/skywalkingexporter/go.mod @@ -6,14 +6,15 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 google.golang.org/grpc v1.45.0 skywalking.apache.org/repo/goapi v0.0.0-20211122071111-ffc517fbfe21 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -21,14 +22,13 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.1.16 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -37,8 +37,8 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -49,3 +49,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/skywalkingexporter/go.sum b/exporter/skywalkingexporter/go.sum index 0898862bd66b..ad88947fda0e 100644 --- a/exporter/skywalkingexporter/go.sum +++ b/exporter/skywalkingexporter/go.sum @@ -20,8 +20,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -97,7 +97,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -131,8 +130,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -188,8 +187,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -205,10 +202,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -217,7 +216,7 @@ go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= @@ -259,8 +258,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -288,8 +287,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/exporter/skywalkingexporter/logrecord_to_logdata.go b/exporter/skywalkingexporter/logrecord_to_logdata.go index 84146a8e49ad..891910756cf6 100644 --- a/exporter/skywalkingexporter/logrecord_to_logdata.go +++ b/exporter/skywalkingexporter/logrecord_to_logdata.go @@ -18,8 +18,9 @@ import ( "strconv" "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" common "skywalking.apache.org/repo/goapi/collect/common/v3" logpb "skywalking.apache.org/repo/goapi/collect/logging/v3" ) @@ -34,7 +35,7 @@ const ( defaultServiceName = "otel-collector" ) -func logRecordToLogData(ld pdata.Logs) []*logpb.LogData { +func logRecordToLogData(ld plog.Logs) []*logpb.LogData { lds := make([]*logpb.LogData, 0) rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { @@ -57,7 +58,7 @@ func logRecordToLogData(ld pdata.Logs) []*logpb.LogData { return lds } -func resourceToLogData(resource pdata.Resource, logData *logpb.LogData) { +func resourceToLogData(resource pcommon.Resource, logData *logpb.LogData) { attrs := resource.Attributes() if serviceName, ok := attrs.Get(conventions.AttributeServiceName); ok { @@ -70,7 +71,7 @@ func resourceToLogData(resource pdata.Resource, logData *logpb.LogData) { logData.ServiceInstance = serviceInstanceID.AsString() } - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { logData.Tags.Data = append(logData.Tags.Data, &common.KeyStringValuePair{ Key: k, Value: v.AsString(), @@ -79,7 +80,7 @@ func resourceToLogData(resource pdata.Resource, logData *logpb.LogData) { }) } -func instrumentationLibraryToLogData(instrumentationLibrary pdata.InstrumentationScope, logData *logpb.LogData) { +func instrumentationLibraryToLogData(instrumentationLibrary pcommon.InstrumentationScope, logData *logpb.LogData) { if nameValue := instrumentationLibrary.Name(); nameValue != "" { logData.Tags.Data = append(logData.Tags.Data, &common.KeyStringValuePair{ Key: instrumentationName, @@ -94,8 +95,8 @@ func instrumentationLibraryToLogData(instrumentationLibrary pdata.Instrumentatio } } -func mapLogRecordToLogData(lr pdata.LogRecord, logData *logpb.LogData) { - if lr.Body().Type() == pdata.ValueTypeEmpty { +func mapLogRecordToLogData(lr plog.LogRecord, logData *logpb.LogData) { + if lr.Body().Type() == pcommon.ValueTypeEmpty { return } @@ -117,7 +118,7 @@ func mapLogRecordToLogData(lr pdata.LogRecord, logData *logpb.LogData) { }) } - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { logData.Tags.Data = append(logData.Tags.Data, &common.KeyStringValuePair{ Key: k, Value: v.AsString(), diff --git a/exporter/skywalkingexporter/logrecord_to_logdata_test.go b/exporter/skywalkingexporter/logrecord_to_logdata_test.go index e9fce1577282..1288dc6bb567 100644 --- a/exporter/skywalkingexporter/logrecord_to_logdata_test.go +++ b/exporter/skywalkingexporter/logrecord_to_logdata_test.go @@ -19,24 +19,25 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" logpb "skywalking.apache.org/repo/goapi/collect/logging/v3" ) -func getComplexAttributeValueMap() pdata.Value { - mapVal := pdata.NewValueMap() +func getComplexAttributeValueMap() pcommon.Value { + mapVal := pcommon.NewValueMap() mapValReal := mapVal.MapVal() mapValReal.InsertBool("result", true) mapValReal.InsertString("status", "ok") mapValReal.InsertDouble("value", 1.3) mapValReal.InsertInt("code", 200) mapValReal.InsertNull("null") - arrayVal := pdata.NewValueSlice() + arrayVal := pcommon.NewValueSlice() arrayVal.SliceVal().AppendEmpty().SetStringVal("array") mapValReal.Insert("array", arrayVal) - subMapVal := pdata.NewValueMap() + subMapVal := pcommon.NewValueMap() subMapVal.MapVal().InsertString("data", "hello world") mapValReal.Insert("map", subMapVal) @@ -44,8 +45,8 @@ func getComplexAttributeValueMap() pdata.Value { return mapVal } -func createLogData(numberOfLogs int) pdata.Logs { - logs := pdata.NewLogs() +func createLogData(numberOfLogs int) plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() rl := logs.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertString("resourceKey", "resourceValue") @@ -57,13 +58,13 @@ func createLogData(numberOfLogs int) pdata.Logs { sl.Scope().SetVersion("v0.1.0") for i := 0; i < numberOfLogs; i++ { - ts := pdata.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() - logRecord.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})) - logRecord.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + logRecord.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 8, 7, 6, 5, 4, 3, 2, 1})) + logRecord.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) logRecord.SetFlags(uint32(0x01)) logRecord.SetSeverityText("INFO") - logRecord.SetSeverityNumber(pdata.SeverityNumberINFO) + logRecord.SetSeverityNumber(plog.SeverityNumberINFO) logRecord.SetTimestamp(ts) switch i { case 0: @@ -80,7 +81,7 @@ func createLogData(numberOfLogs int) pdata.Logs { logRecord.Attributes().Insert("map-value", getComplexAttributeValueMap()) logRecord.Body().SetStringVal("log contents") case 6: - arrayVal := pdata.NewValueSlice() + arrayVal := pcommon.NewValueSlice() arrayVal.SliceVal().AppendEmpty().SetStringVal("array") logRecord.Attributes().Insert("array-value", arrayVal) logRecord.Body().SetStringVal("log contents") @@ -105,7 +106,7 @@ func TestLogsDataToLogService(t *testing.T) { assert.Equal(t, searchLogTag(flags, log), "1") assert.Equal(t, searchLogTag(severityText, log), "INFO") assert.Equal(t, searchLogTag(severityNumber, log), "9") - assert.Equal(t, log.Timestamp, pdata.Timestamp(int64(i)*time.Millisecond.Nanoseconds()).AsTime().UnixMilli()) + assert.Equal(t, log.Timestamp, pcommon.Timestamp(int64(i)*time.Millisecond.Nanoseconds()).AsTime().UnixMilli()) if i == 1 { assert.Equal(t, log.GetBody().GetText().GetText(), "true") } else if i == 2 { diff --git a/exporter/skywalkingexporter/metricrecord_to_metricdata.go b/exporter/skywalkingexporter/metricrecord_to_metricdata.go index f5d2ea8c7631..c295a4f73d7b 100644 --- a/exporter/skywalkingexporter/metricrecord_to_metricdata.go +++ b/exporter/skywalkingexporter/metricrecord_to_metricdata.go @@ -17,8 +17,9 @@ package skywalkingexporter // import "github.com/open-telemetry/opentelemetry-co import ( "strconv" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" metricpb "skywalking.apache.org/repo/goapi/collect/language/agent/v3" ) @@ -26,10 +27,10 @@ const ( defaultServiceInstance = "otel-collector-instance" ) -func resourceToMetricLabels(resource pdata.Resource) []*metricpb.Label { +func resourceToMetricLabels(resource pcommon.Resource) []*metricpb.Label { attrs := resource.Attributes() labels := make([]*metricpb.Label, 0, attrs.Len()) - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { labels = append(labels, &metricpb.Label{ Name: k, @@ -40,7 +41,7 @@ func resourceToMetricLabels(resource pdata.Resource) []*metricpb.Label { return labels } -func resourceToServiceInfo(resource pdata.Resource) (service string, serviceInstance string) { +func resourceToServiceInfo(resource pcommon.Resource) (service string, serviceInstance string) { attrs := resource.Attributes() if serviceName, ok := attrs.Get(conventions.AttributeServiceName); ok { service = serviceName.AsString() @@ -55,13 +56,13 @@ func resourceToServiceInfo(resource pdata.Resource) (service string, serviceInst return service, serviceInstance } -func numberMetricsToData(name string, data pdata.NumberDataPointSlice, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { +func numberMetricsToData(name string, data pmetric.NumberDataPointSlice, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { metrics = make([]*metricpb.MeterData, 0, data.Len()) for i := 0; i < data.Len(); i++ { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := make([]*metricpb.Label, 0, attributeMap.Len()+len(defaultLabels)) - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { labels = append(labels, &metricpb.Label{Name: k, Value: v.AsString()}) return true }) @@ -75,9 +76,9 @@ func numberMetricsToData(name string, data pdata.NumberDataPointSlice, defaultLa meterData.Timestamp = dataPoint.Timestamp().AsTime().UnixMilli() sv.SingleValue.Name = name switch dataPoint.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: sv.SingleValue.Value = float64(dataPoint.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: sv.SingleValue.Value = dataPoint.DoubleVal() } meterData.Metric = sv @@ -86,13 +87,13 @@ func numberMetricsToData(name string, data pdata.NumberDataPointSlice, defaultLa return metrics } -func doubleHistogramMetricsToData(name string, data pdata.HistogramDataPointSlice, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { +func doubleHistogramMetricsToData(name string, data pmetric.HistogramDataPointSlice, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { metrics = make([]*metricpb.MeterData, 0, 3*data.Len()) for i := 0; i < data.Len(); i++ { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := make([]*metricpb.Label, 0, attributeMap.Len()+len(defaultLabels)) - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { labels = append(labels, &metricpb.Label{Name: k, Value: v.AsString()}) return true }) @@ -140,13 +141,13 @@ func doubleHistogramMetricsToData(name string, data pdata.HistogramDataPointSlic return metrics } -func doubleSummaryMetricsToData(name string, data pdata.SummaryDataPointSlice, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { +func doubleSummaryMetricsToData(name string, data pmetric.SummaryDataPointSlice, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { metrics = make([]*metricpb.MeterData, 0, 3*data.Len()) for i := 0; i < data.Len(); i++ { dataPoint := data.At(i) attributeMap := dataPoint.Attributes() labels := make([]*metricpb.Label, 0, attributeMap.Len()+len(defaultLabels)) - attributeMap.Range(func(k string, v pdata.Value) bool { + attributeMap.Range(func(k string, v pcommon.Value) bool { labels = append(labels, &metricpb.Label{Name: k, Value: v.AsString()}) return true }) @@ -192,24 +193,24 @@ func doubleSummaryMetricsToData(name string, data pdata.SummaryDataPointSlice, d return metrics } -func metricDataToSwMetricData(md pdata.Metric, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { +func metricDataToSwMetricData(md pmetric.Metric, defaultLabels []*metricpb.Label) (metrics []*metricpb.MeterData) { switch md.DataType() { - case pdata.MetricDataTypeNone: + case pmetric.MetricDataTypeNone: break - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return numberMetricsToData(md.Name(), md.Gauge().DataPoints(), defaultLabels) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return numberMetricsToData(md.Name(), md.Sum().DataPoints(), defaultLabels) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return doubleHistogramMetricsToData(md.Name(), md.Histogram().DataPoints(), defaultLabels) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return doubleSummaryMetricsToData(md.Name(), md.Summary().DataPoints(), defaultLabels) } return nil } func metricsRecordToMetricData( - md pdata.Metrics, + md pmetric.Metrics, ) (metrics *metricpb.MeterDataCollection) { resMetrics := md.ResourceMetrics() for i := 0; i < resMetrics.Len(); i++ { diff --git a/exporter/skywalkingexporter/metricrecord_to_metricdata_test.go b/exporter/skywalkingexporter/metricrecord_to_metricdata_test.go index 56ce04fbc3f1..f08357d5ba74 100644 --- a/exporter/skywalkingexporter/metricrecord_to_metricdata_test.go +++ b/exporter/skywalkingexporter/metricrecord_to_metricdata_test.go @@ -18,12 +18,13 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" metricpb "skywalking.apache.org/repo/goapi/collect/language/agent/v3" ) func TestMetricDataToLogService(t *testing.T) { - md := pdata.NewMetrics() + md := pmetric.NewMetrics() md.ResourceMetrics().AppendEmpty() // Add an empty ResourceMetrics rm := md.ResourceMetrics().AppendEmpty() @@ -43,7 +44,7 @@ func TestMetricDataToLogService(t *testing.T) { noneMetric.SetName("none") intGaugeMetric := metrics.AppendEmpty() - intGaugeMetric.SetDataType(pdata.MetricDataTypeGauge) + intGaugeMetric.SetDataType(pmetric.MetricDataTypeGauge) intGaugeMetric.SetName("int_gauge") intGauge := intGaugeMetric.Gauge() intGaugeDataPoints := intGauge.DataPoints() @@ -51,40 +52,40 @@ func TestMetricDataToLogService(t *testing.T) { intGaugeDataPoint.Attributes().InsertString("innerLabel", "innerValue") intGaugeDataPoint.Attributes().InsertString("testa", "test") intGaugeDataPoint.SetIntVal(10) - intGaugeDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + intGaugeDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleGaugeMetric := metrics.AppendEmpty() - doubleGaugeMetric.SetDataType(pdata.MetricDataTypeGauge) + doubleGaugeMetric.SetDataType(pmetric.MetricDataTypeGauge) doubleGaugeMetric.SetName("double_gauge") doubleGauge := doubleGaugeMetric.Gauge() doubleGaugeDataPoints := doubleGauge.DataPoints() doubleGaugeDataPoint := doubleGaugeDataPoints.AppendEmpty() doubleGaugeDataPoint.Attributes().InsertString("innerLabel", "innerValue") doubleGaugeDataPoint.SetDoubleVal(10.1) - doubleGaugeDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleGaugeDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) intSumMetric := metrics.AppendEmpty() - intSumMetric.SetDataType(pdata.MetricDataTypeSum) + intSumMetric.SetDataType(pmetric.MetricDataTypeSum) intSumMetric.SetName("int_sum") intSum := intSumMetric.Sum() intSumDataPoints := intSum.DataPoints() intSumDataPoint := intSumDataPoints.AppendEmpty() intSumDataPoint.Attributes().InsertString("innerLabel", "innerValue") intSumDataPoint.SetIntVal(11) - intSumDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + intSumDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleSumMetric := metrics.AppendEmpty() - doubleSumMetric.SetDataType(pdata.MetricDataTypeSum) + doubleSumMetric.SetDataType(pmetric.MetricDataTypeSum) doubleSumMetric.SetName("double_sum") doubleSum := doubleSumMetric.Sum() doubleSumDataPoints := doubleSum.DataPoints() doubleSumDataPoint := doubleSumDataPoints.AppendEmpty() doubleSumDataPoint.Attributes().InsertString("innerLabel", "innerValue") doubleSumDataPoint.SetDoubleVal(10.1) - doubleSumDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleSumDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleHistogramMetric := metrics.AppendEmpty() - doubleHistogramMetric.SetDataType(pdata.MetricDataTypeHistogram) + doubleHistogramMetric.SetDataType(pmetric.MetricDataTypeHistogram) doubleHistogramMetric.SetName("double_$histogram") doubleHistogram := doubleHistogramMetric.Histogram() doubleHistogramDataPoints := doubleHistogram.DataPoints() @@ -93,19 +94,19 @@ func TestMetricDataToLogService(t *testing.T) { doubleHistogramDataPoint.Attributes().InsertString("innerLabelH", "innerValueH") doubleHistogramDataPoint.SetCount(5) doubleHistogramDataPoint.SetSum(10.1) - doubleHistogramDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleHistogramDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleHistogramDataPoint.SetBucketCounts([]uint64{1, 2, 2}) doubleHistogramDataPoint.SetExplicitBounds([]float64{1, 2}) doubleSummaryMetric := metrics.AppendEmpty() - doubleSummaryMetric.SetDataType(pdata.MetricDataTypeSummary) + doubleSummaryMetric.SetDataType(pmetric.MetricDataTypeSummary) doubleSummaryMetric.SetName("double-summary") doubleSummary := doubleSummaryMetric.Summary() doubleSummaryDataPoints := doubleSummary.DataPoints() doubleSummaryDataPoint := doubleSummaryDataPoints.AppendEmpty() doubleSummaryDataPoint.SetCount(2) doubleSummaryDataPoint.SetSum(10.1) - doubleSummaryDataPoint.SetTimestamp(pdata.Timestamp(100_000_000)) + doubleSummaryDataPoint.SetTimestamp(pcommon.Timestamp(100_000_000)) doubleSummaryDataPoint.Attributes().InsertString("innerLabel", "innerValue") doubleSummaryDataPoint.Attributes().InsertString("innerLabelS", "innerValueS") quantileVal := doubleSummaryDataPoint.QuantileValues().AppendEmpty() diff --git a/exporter/skywalkingexporter/skywalking.go b/exporter/skywalkingexporter/skywalking.go index 34aa9d49a0ca..86d095796906 100644 --- a/exporter/skywalkingexporter/skywalking.go +++ b/exporter/skywalkingexporter/skywalking.go @@ -20,7 +20,8 @@ import ( "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "google.golang.org/grpc" "google.golang.org/grpc/metadata" metricpb "skywalking.apache.org/repo/goapi/collect/language/agent/v3" @@ -122,7 +123,7 @@ func newMetricsExporter(ctx context.Context, cfg *Config, settings component.Tel return oce } -func (oce *swExporter) pushLogs(_ context.Context, td pdata.Logs) error { +func (oce *swExporter) pushLogs(_ context.Context, td plog.Logs) error { // Get first available log Client. tClient, ok := <-oce.logsClients if !ok { @@ -154,7 +155,7 @@ func (oce *swExporter) pushLogs(_ context.Context, td pdata.Logs) error { return nil } -func (oce *swExporter) pushMetrics(_ context.Context, td pdata.Metrics) error { +func (oce *swExporter) pushMetrics(_ context.Context, td pmetric.Metrics) error { // Get first available metric Client. tClient, ok := <-oce.metricsClients if !ok { diff --git a/exporter/splunkhecexporter/client.go b/exporter/splunkhecexporter/client.go index 516abcdc79fb..636b9f5afffd 100644 --- a/exporter/splunkhecexporter/client.go +++ b/exporter/splunkhecexporter/client.go @@ -28,7 +28,9 @@ import ( jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "go.uber.org/zap" @@ -71,7 +73,7 @@ const minCompressionLen = 1500 func (c *client) pushMetricsData( ctx context.Context, - md pdata.Metrics, + md pmetric.Metrics, ) error { c.wg.Add(1) defer c.wg.Done() @@ -117,7 +119,7 @@ func (c *client) pushMetricsData( func (c *client) pushTraceData( ctx context.Context, - td pdata.Traces, + td ptrace.Traces, ) error { c.wg.Add(1) defer c.wg.Done() @@ -169,7 +171,7 @@ func (c *client) sendSplunkEvents(ctx context.Context, splunkEvents []*splunk.Ev return c.postEvents(ctx, body, nil, compressed) } -func (c *client) pushLogData(ctx context.Context, ld pdata.Logs) error { +func (c *client) pushLogData(ctx context.Context, ld plog.Logs) error { c.wg.Add(1) defer c.wg.Done() @@ -226,7 +228,7 @@ var profilingHeaders = map[string]string{ libraryHeaderName: profilingLibraryName, } -func isProfilingData(sl pdata.ScopeLogs) bool { +func isProfilingData(sl plog.ScopeLogs) bool { return sl.Scope().Name() == profilingLibraryName } @@ -252,7 +254,7 @@ func makeBlankBufferState(bufCap uint) bufferState { // ld log records are parsed to Splunk events. // The input data may contain both logs and profiling data. // They are batched separately and sent with different HTTP headers -func (c *client) pushLogDataInBatches(ctx context.Context, ld pdata.Logs, send func(context.Context, *bytes.Buffer, map[string]string) error) error { +func (c *client) pushLogDataInBatches(ctx context.Context, ld plog.Logs, send func(context.Context, *bytes.Buffer, map[string]string) error) error { var bufState = makeBlankBufferState(c.config.MaxContentLengthLogs) var profilingBufState = makeBlankBufferState(c.config.MaxContentLengthLogs) var permanentErrors []error @@ -314,7 +316,7 @@ func (c *client) pushLogDataInBatches(ctx context.Context, ld pdata.Logs, send f return multierr.Combine(permanentErrors...) } -func (c *client) pushLogRecords(ctx context.Context, lds pdata.ResourceLogsSlice, state *bufferState, headers map[string]string, send func(context.Context, *bytes.Buffer, map[string]string) error) (permanentErrors []error, sendingError error) { +func (c *client) pushLogRecords(ctx context.Context, lds plog.ResourceLogsSlice, state *bufferState, headers map[string]string, send func(context.Context, *bytes.Buffer, map[string]string) error) (permanentErrors []error, sendingError error) { res := lds.At(state.resource) logs := res.ScopeLogs().At(state.library).LogRecords() bufCap := int(c.config.MaxContentLengthLogs) @@ -379,7 +381,7 @@ func (c *client) pushLogRecords(ctx context.Context, lds pdata.ResourceLogsSlice return permanentErrors, nil } -func (c *client) pushMetricsRecords(ctx context.Context, mds pdata.ResourceMetricsSlice, state *bufferState, send func(context.Context, *bytes.Buffer) error) (permanentErrors []error, sendingError error) { +func (c *client) pushMetricsRecords(ctx context.Context, mds pmetric.ResourceMetricsSlice, state *bufferState, send func(context.Context, *bytes.Buffer) error) (permanentErrors []error, sendingError error) { res := mds.At(state.resource) metrics := res.ScopeMetrics().At(state.library).Metrics() bufCap := int(c.config.MaxContentLengthMetrics) @@ -446,7 +448,7 @@ func (c *client) pushMetricsRecords(ctx context.Context, mds pdata.ResourceMetri return permanentErrors, nil } -func (c *client) pushTracesData(ctx context.Context, tds pdata.ResourceSpansSlice, state *bufferState, send func(context.Context, *bytes.Buffer) error) (permanentErrors []error, sendingError error) { +func (c *client) pushTracesData(ctx context.Context, tds ptrace.ResourceSpansSlice, state *bufferState, send func(context.Context, *bytes.Buffer) error) (permanentErrors []error, sendingError error) { res := tds.At(state.resource) spans := res.ScopeSpans().At(state.library).Spans() bufCap := int(c.config.MaxContentLengthTraces) @@ -514,7 +516,7 @@ func (c *client) pushTracesData(ctx context.Context, tds pdata.ResourceSpansSlic // pushMetricsDataInBatches sends batches of Splunk events in JSON format. // The batch content length is restricted to MaxContentLengthMetrics. // md metrics are parsed to Splunk events. -func (c *client) pushMetricsDataInBatches(ctx context.Context, md pdata.Metrics, send func(context.Context, *bytes.Buffer) error) error { +func (c *client) pushMetricsDataInBatches(ctx context.Context, md pmetric.Metrics, send func(context.Context, *bytes.Buffer) error) error { var bufState = makeBlankBufferState(c.config.MaxContentLengthMetrics) var permanentErrors []error @@ -549,7 +551,7 @@ func (c *client) pushMetricsDataInBatches(ctx context.Context, md pdata.Metrics, // pushTracesDataInBatches sends batches of Splunk events in JSON format. // The batch content length is restricted to MaxContentLengthMetrics. // td traces are parsed to Splunk events. -func (c *client) pushTracesDataInBatches(ctx context.Context, td pdata.Traces, send func(context.Context, *bytes.Buffer) error) error { +func (c *client) pushTracesDataInBatches(ctx context.Context, td ptrace.Traces, send func(context.Context, *bytes.Buffer) error) error { var bufState = makeBlankBufferState(c.config.MaxContentLengthTraces) var permanentErrors []error @@ -616,12 +618,12 @@ func (c *client) postEvents(ctx context.Context, events io.Reader, headers map[s // subLogs returns a subset of `ld` starting from `profilingBufFront` for profiling data // plus starting from `bufFront` for non-profiling data. Both can be nil, in which case they are ignored -func (c *client) subLogs(ld *pdata.Logs, bufFront *index, profilingBufFront *index) *pdata.Logs { +func (c *client) subLogs(ld *plog.Logs, bufFront *index, profilingBufFront *index) *plog.Logs { if ld == nil { return ld } - subset := pdata.NewLogs() + subset := plog.NewLogs() if c.config.LogDataEnabled { subLogsByType(ld, bufFront, &subset, false) } @@ -633,30 +635,30 @@ func (c *client) subLogs(ld *pdata.Logs, bufFront *index, profilingBufFront *ind } // subMetrics returns a subset of `md`starting from `bufFront`. It can be nil, in which case it is ignored -func subMetrics(md *pdata.Metrics, bufFront *index) *pdata.Metrics { +func subMetrics(md *pmetric.Metrics, bufFront *index) *pmetric.Metrics { if md == nil { return md } - subset := pdata.NewMetrics() + subset := pmetric.NewMetrics() subMetricsByType(md, bufFront, &subset) return &subset } // subTraces returns a subset of `td`starting from `bufFront`. It can be nil, in which case it is ignored -func subTraces(td *pdata.Traces, bufFront *index) *pdata.Traces { +func subTraces(td *ptrace.Traces, bufFront *index) *ptrace.Traces { if td == nil { return td } - subset := pdata.NewTraces() + subset := ptrace.NewTraces() subTracesByType(td, bufFront, &subset) return &subset } -func subLogsByType(src *pdata.Logs, from *index, dst *pdata.Logs, profiling bool) { +func subLogsByType(src *plog.Logs, from *index, dst *plog.Logs, profiling bool) { if from == nil { return // All the data of this type was sent successfully } @@ -703,7 +705,7 @@ func subLogsByType(src *pdata.Logs, from *index, dst *pdata.Logs, profiling bool } } -func subMetricsByType(src *pdata.Metrics, from *index, dst *pdata.Metrics) { +func subMetricsByType(src *pmetric.Metrics, from *index, dst *pmetric.Metrics) { if from == nil { return // All the data of this type was sent successfully } @@ -745,7 +747,7 @@ func subMetricsByType(src *pdata.Metrics, from *index, dst *pdata.Metrics) { } } -func subTracesByType(src *pdata.Traces, from *index, dst *pdata.Traces) { +func subTracesByType(src *ptrace.Traces, from *index, dst *ptrace.Traces) { if from == nil { return // All the data of this type was sent successfully } diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go index 972ca264dba1..a7cc5aaba986 100644 --- a/exporter/splunkhecexporter/client_test.go +++ b/exporter/splunkhecexporter/client_test.go @@ -32,8 +32,11 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "go.uber.org/zap/zaptest" @@ -71,10 +74,10 @@ func newTestClientWithPresetResponses(codes []int, bodies []string) (*http.Clien }, &headers } -func createMetricsData(numberOfDataPoints int) pdata.Metrics { +func createMetricsData(numberOfDataPoints int) pmetric.Metrics { doubleVal := 1234.5678 - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() rm := metrics.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("k0", "v0") rm.Resource().Attributes().InsertString("k1", "v1") @@ -85,9 +88,9 @@ func createMetricsData(numberOfDataPoints int) pdata.Metrics { ilm := rm.ScopeMetrics().AppendEmpty() metric := ilm.Metrics().AppendEmpty() metric.SetName("gauge_double_with_dims") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) doublePt := metric.Gauge().DataPoints().AppendEmpty() - doublePt.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + doublePt.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) doublePt.SetDoubleVal(doubleVal) doublePt.Attributes().InsertString("k/n0", "vn0") doublePt.Attributes().InsertString("k/n1", "vn1") @@ -98,8 +101,8 @@ func createMetricsData(numberOfDataPoints int) pdata.Metrics { return metrics } -func createTraceData(numberOfTraces int) pdata.Traces { - traces := pdata.NewTraces() +func createTraceData(numberOfTraces int) ptrace.Traces { + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("resource", "R1") ils := rs.ScopeSpans().AppendEmpty() @@ -107,14 +110,14 @@ func createTraceData(numberOfTraces int) pdata.Traces { for i := 0; i < numberOfTraces; i++ { span := ils.Spans().AppendEmpty() span.SetName("root") - span.SetStartTimestamp(pdata.Timestamp((i + 1) * 1e9)) - span.SetEndTimestamp(pdata.Timestamp((i + 2) * 1e9)) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetStartTimestamp(pcommon.Timestamp((i + 1) * 1e9)) + span.SetEndTimestamp(pcommon.Timestamp((i + 2) * 1e9)) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) span.SetTraceState("foo") if i%2 == 0 { - span.SetParentSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) - span.Status().SetCode(pdata.StatusCodeOk) + span.SetParentSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.Status().SetCode(ptrace.StatusCodeOk) span.Status().SetMessage("ok") } } @@ -122,7 +125,7 @@ func createTraceData(numberOfTraces int) pdata.Traces { return traces } -func createLogData(numResources int, numLibraries int, numRecords int) pdata.Logs { +func createLogData(numResources int, numLibraries int, numRecords int) plog.Logs { return createLogDataWithCustomLibraries(numResources, make([]string, numLibraries), repeat(numRecords, numLibraries)) } @@ -134,8 +137,8 @@ func repeat(what int, times int) []int { return result } -func createLogDataWithCustomLibraries(numResources int, libraries []string, numRecords []int) pdata.Logs { - logs := pdata.NewLogs() +func createLogDataWithCustomLibraries(numResources int, libraries []string, numRecords []int) plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().EnsureCapacity(numResources) for i := 0; i < numResources; i++ { rl := logs.ResourceLogs().AppendEmpty() @@ -145,7 +148,7 @@ func createLogDataWithCustomLibraries(numResources int, libraries []string, numR sl.Scope().SetName(libraries[j]) sl.LogRecords().EnsureCapacity(numRecords[j]) for k := 0; k < numRecords[j]; k++ { - ts := pdata.Timestamp(int64(k) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(k) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(splunk.DefaultNameLabel, fmt.Sprintf("%d_%d_%d", i, j, k)) @@ -192,7 +195,7 @@ func (c *CapturingData) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(c.statusCode) } -func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([]receivedRequest, error) { +func runMetricsExport(cfg *Config, metrics pmetric.Metrics, t *testing.T) ([]receivedRequest, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -233,7 +236,7 @@ func runMetricsExport(cfg *Config, metrics pdata.Metrics, t *testing.T) ([]recei } } -func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([]receivedRequest, error) { +func runTraceExport(testConfig *Config, traces ptrace.Traces, t *testing.T) ([]receivedRequest, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -277,7 +280,7 @@ func runTraceExport(testConfig *Config, traces pdata.Traces, t *testing.T) ([]re } } -func runLogExport(cfg *Config, ld pdata.Logs, t *testing.T) ([]receivedRequest, error) { +func runLogExport(cfg *Config, ld plog.Logs, t *testing.T) ([]receivedRequest, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { panic(err) @@ -332,7 +335,7 @@ func TestReceiveTracesBatches(t *testing.T) { tests := []struct { name string conf *Config - traces pdata.Traces + traces ptrace.Traces want wantType }{ { @@ -455,7 +458,7 @@ func TestReceiveLogs(t *testing.T) { tests := []struct { name string conf *Config - logs pdata.Logs + logs plog.Logs want wantType }{ { @@ -591,7 +594,7 @@ func TestReceiveBatchedMetrics(t *testing.T) { tests := []struct { name string conf *Config - metrics pdata.Metrics + metrics pmetric.Metrics want wantType }{ { @@ -755,7 +758,7 @@ func TestInvalidLogs(t *testing.T) { func TestInvalidMetrics(t *testing.T) { cfg := NewFactory().CreateDefaultConfig().(*Config) - _, err := runMetricsExport(cfg, pdata.NewMetrics(), t) + _, err := runMetricsExport(cfg, pmetric.NewMetrics(), t) assert.Error(t, err) } @@ -844,15 +847,15 @@ func TestInvalidURLClient(t *testing.T) { func Test_pushLogData_nil_Logs(t *testing.T) { tests := []struct { name func(bool) string - logs pdata.Logs - requires func(*testing.T, pdata.Logs) + logs plog.Logs + requires func(*testing.T, plog.Logs) }{ { name: func(disable bool) string { return "COMPRESSION " + map[bool]string{true: "DISABLED ", false: "ENABLED "}[disable] + "nil ResourceLogs" }, - logs: pdata.NewLogs(), - requires: func(t *testing.T, logs pdata.Logs) { + logs: plog.NewLogs(), + requires: func(t *testing.T, logs plog.Logs) { require.Zero(t, logs.ResourceLogs().Len()) }, }, @@ -860,12 +863,12 @@ func Test_pushLogData_nil_Logs(t *testing.T) { name: func(disable bool) string { return "COMPRESSION " + map[bool]string{true: "DISABLED ", false: "ENABLED "}[disable] + "nil InstrumentationLogs" }, - logs: func() pdata.Logs { - logs := pdata.NewLogs() + logs: func() plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() return logs }(), - requires: func(t *testing.T, logs pdata.Logs) { + requires: func(t *testing.T, logs plog.Logs) { require.Equal(t, logs.ResourceLogs().Len(), 1) require.Zero(t, logs.ResourceLogs().At(0).ScopeLogs().Len()) }, @@ -874,12 +877,12 @@ func Test_pushLogData_nil_Logs(t *testing.T) { name: func(disable bool) string { return "COMPRESSION " + map[bool]string{true: "DISABLED ", false: "ENABLED "}[disable] + "nil LogRecords" }, - logs: func() pdata.Logs { - logs := pdata.NewLogs() + logs: func() plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() return logs }(), - requires: func(t *testing.T, logs pdata.Logs) { + requires: func(t *testing.T, logs plog.Logs) { require.Equal(t, logs.ResourceLogs().Len(), 1) require.Equal(t, logs.ResourceLogs().At(0).ScopeLogs().Len(), 1) require.Zero(t, logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().Len()) @@ -916,7 +919,7 @@ func Test_pushLogData_InvalidLog(t *testing.T) { logger: zaptest.NewLogger(t), } - logs := pdata.NewLogs() + logs := plog.NewLogs() log := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() // Invalid log value log.Body().SetDoubleVal(math.Inf(1)) diff --git a/exporter/splunkhecexporter/exporter.go b/exporter/splunkhecexporter/exporter.go index 4d928584e034..a33ab591bce8 100644 --- a/exporter/splunkhecexporter/exporter.go +++ b/exporter/splunkhecexporter/exporter.go @@ -26,7 +26,9 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -41,9 +43,9 @@ const ( ) type splunkExporter struct { - pushMetricsData func(ctx context.Context, md pdata.Metrics) error - pushTraceData func(ctx context.Context, td pdata.Traces) error - pushLogData func(ctx context.Context, td pdata.Logs) error + pushMetricsData func(ctx context.Context, md pmetric.Metrics) error + pushTraceData func(ctx context.Context, td ptrace.Traces) error + pushLogData func(ctx context.Context, td plog.Logs) error stop func(ctx context.Context) (err error) start func(ctx context.Context, host component.Host) (err error) } diff --git a/exporter/splunkhecexporter/exporter_test.go b/exporter/splunkhecexporter/exporter_test.go index a01521c4b8e5..280bfb855ad6 100644 --- a/exporter/splunkhecexporter/exporter_test.go +++ b/exporter/splunkhecexporter/exporter_test.go @@ -36,8 +36,9 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "google.golang.org/protobuf/types/known/timestamppb" @@ -216,12 +217,12 @@ func generateLargeBatch() *agentmetricspb.ExportMetricsServiceRequest { return md } -func generateLargeLogsBatch() pdata.Logs { - logs := pdata.NewLogs() +func generateLargeLogsBatch() plog.Logs { + logs := plog.NewLogs() rl := logs.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() sl.LogRecords().EnsureCapacity(65000) - ts := pdata.Timestamp(123) + ts := pcommon.Timestamp(123) for i := 0; i < 65000; i++ { logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") @@ -237,7 +238,7 @@ func generateLargeLogsBatch() pdata.Logs { } func TestConsumeLogsData(t *testing.T) { - smallBatch := pdata.NewLogs() + smallBatch := plog.NewLogs() logRecord := smallBatch.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") @@ -245,7 +246,7 @@ func TestConsumeLogsData(t *testing.T) { logRecord.SetTimestamp(123) tests := []struct { name string - ld pdata.Logs + ld plog.Logs reqTestFunc func(t *testing.T, r *http.Request) httpResponseCode int wantErr bool diff --git a/exporter/splunkhecexporter/go.mod b/exporter/splunkhecexporter/go.mod index 6dd154802e06..943d6a73390d 100644 --- a/exporter/splunkhecexporter/go.mod +++ b/exporter/splunkhecexporter/go.mod @@ -10,8 +10,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 @@ -19,27 +20,26 @@ require ( require ( github.com/benbjohnson/clock v1.3.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -54,3 +54,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr => ../../pkg/batchperresourceattr replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/splunkhecexporter/go.sum b/exporter/splunkhecexporter/go.sum index ff9d54cea9ad..fa1b81d57e2d 100644 --- a/exporter/splunkhecexporter/go.sum +++ b/exporter/splunkhecexporter/go.sum @@ -19,8 +19,8 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -94,7 +94,6 @@ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -128,8 +127,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -177,8 +176,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -194,17 +191,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -241,8 +240,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -266,8 +265,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/exporter/splunkhecexporter/logdata_to_splunk.go b/exporter/splunkhecexporter/logdata_to_splunk.go index 81777fe89a5a..63dfedeac6aa 100644 --- a/exporter/splunkhecexporter/logdata_to_splunk.go +++ b/exporter/splunkhecexporter/logdata_to_splunk.go @@ -17,7 +17,8 @@ package splunkhecexporter // import "github.com/open-telemetry/opentelemetry-col import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -31,7 +32,7 @@ const ( traceIDFieldKey = "trace_id" ) -func mapLogRecordToSplunkEvent(res pdata.Resource, lr pdata.LogRecord, config *Config, logger *zap.Logger) *splunk.Event { +func mapLogRecordToSplunkEvent(res pcommon.Resource, lr plog.LogRecord, config *Config, logger *zap.Logger) *splunk.Event { host := unknownHostName source := config.Source sourcetype := config.SourceType @@ -52,11 +53,11 @@ func mapLogRecordToSplunkEvent(res pdata.Resource, lr pdata.LogRecord, config *C if lr.SeverityText() != "" { fields[severityTextKey] = lr.SeverityText() } - if lr.SeverityNumber() != pdata.SeverityNumberUNDEFINED { + if lr.SeverityNumber() != plog.SeverityNumberUNDEFINED { fields[severityNumberKey] = lr.SeverityNumber() } - res.Attributes().Range(func(k string, v pdata.Value) bool { + res.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case hostKey: host = v.StringVal() @@ -73,7 +74,7 @@ func mapLogRecordToSplunkEvent(res pdata.Resource, lr pdata.LogRecord, config *C } return true }) - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case hostKey: host = v.StringVal() @@ -103,31 +104,31 @@ func mapLogRecordToSplunkEvent(res pdata.Resource, lr pdata.LogRecord, config *C } } -func convertAttributeValue(value pdata.Value, logger *zap.Logger) interface{} { +func convertAttributeValue(value pcommon.Value, logger *zap.Logger) interface{} { switch value.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return value.IntVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return value.BoolVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return value.DoubleVal() - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return value.StringVal() - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: values := map[string]interface{}{} - value.MapVal().Range(func(k string, v pdata.Value) bool { + value.MapVal().Range(func(k string, v pcommon.Value) bool { values[k] = convertAttributeValue(v, logger) return true }) return values - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: arrayVal := value.SliceVal() values := make([]interface{}, arrayVal.Len()) for i := 0; i < arrayVal.Len(); i++ { values[i] = convertAttributeValue(arrayVal.At(i), logger) } return values - case pdata.ValueTypeEmpty: + case pcommon.ValueTypeEmpty: return nil default: logger.Debug("Unhandled value type", zap.String("type", value.Type().String())) @@ -136,7 +137,7 @@ func convertAttributeValue(value pdata.Value, logger *zap.Logger) interface{} { } // nanoTimestampToEpochMilliseconds transforms nanoseconds into .. For example, 1433188255.500 indicates 1433188255 seconds and 500 milliseconds after epoch. -func nanoTimestampToEpochMilliseconds(ts pdata.Timestamp) *float64 { +func nanoTimestampToEpochMilliseconds(ts pcommon.Timestamp) *float64 { duration := time.Duration(ts) if duration == 0 { // some telemetry sources send data with timestamps set to 0 by design, as their original target destinations diff --git a/exporter/splunkhecexporter/logdata_to_splunk_test.go b/exporter/splunkhecexporter/logdata_to_splunk_test.go index 7143d9e40113..3ab95912c6cd 100644 --- a/exporter/splunkhecexporter/logdata_to_splunk_test.go +++ b/exporter/splunkhecexporter/logdata_to_splunk_test.go @@ -18,8 +18,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -27,19 +28,19 @@ import ( func Test_mapLogRecordToSplunkEvent(t *testing.T) { logger := zap.NewNop() - ts := pdata.Timestamp(123) + ts := pcommon.Timestamp(123) tests := []struct { name string - logRecordFn func() pdata.LogRecord - logResourceFn func() pdata.Resource + logRecordFn func() plog.LogRecord + logResourceFn func() pcommon.Resource configDataFn func() *Config wantSplunkEvents []*splunk.Event }{ { name: "valid", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") @@ -48,7 +49,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -62,8 +63,8 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with_name", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") @@ -72,7 +73,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -86,14 +87,14 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with_hec_token", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(splunk.HecTokenLabel, "mytoken") logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -107,8 +108,8 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "non-string attribute", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") @@ -117,7 +118,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -130,14 +131,14 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with_config", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString("custom", "custom") logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -150,8 +151,8 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with_custom_mapping", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString("custom", "custom") logRecord.Attributes().InsertString("mysource", "mysource") @@ -159,11 +160,11 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.Attributes().InsertString("myindex", "myindex") logRecord.Attributes().InsertString("myhost", "myhost") logRecord.SetSeverityText("DEBUG") - logRecord.SetSeverityNumber(pdata.SeverityNumberDEBUG) + logRecord.SetSeverityNumber(plog.SeverityNumberDEBUG) logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { return &Config{ HecToOtelAttrs: splunk.HecToOtelAttrs{ @@ -180,7 +181,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, wantSplunkEvents: []*splunk.Event{ func() *splunk.Event { - event := commonLogSplunkEvent("mylog", ts, map[string]interface{}{"custom": "custom", "myseverity": "DEBUG", "myseveritynum": pdata.SeverityNumber(5)}, "myhost", "mysource", "mysourcetype") + event := commonLogSplunkEvent("mylog", ts, map[string]interface{}{"custom": "custom", "myseverity": "DEBUG", "myseveritynum": plog.SeverityNumber(5)}, "myhost", "mysource", "mysourcetype") event.Index = "myindex" return event }(), @@ -188,11 +189,11 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "log_is_empty", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -205,13 +206,13 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with span and trace id", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() - logRecord.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 50})) - logRecord.SetTraceID(pdata.NewTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100})) + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() + logRecord.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 50})) + logRecord.SetTraceID(pcommon.NewTraceID([16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100})) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -227,8 +228,8 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with double body", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetDoubleVal(42) logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") @@ -237,7 +238,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -250,8 +251,8 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with int body", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetIntVal(42) logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") @@ -260,7 +261,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -273,8 +274,8 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with bool body", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetBoolVal(true) logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") @@ -283,7 +284,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -296,9 +297,9 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with map body", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() - attVal := pdata.NewValueMap() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() + attVal := pcommon.NewValueMap() attMap := attVal.MapVal() attMap.InsertDouble("23", 45) attMap.InsertString("foo", "bar") @@ -310,7 +311,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -325,8 +326,8 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with nil body", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") @@ -334,7 +335,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -348,9 +349,9 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with array body", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() - attVal := pdata.NewValueSlice() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() + attVal := pcommon.NewValueSlice() attArray := attVal.SliceVal() attArray.AppendEmpty().SetStringVal("foo") attVal.CopyTo(logRecord.Body()) @@ -361,7 +362,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -375,14 +376,14 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "log resource attribute", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: func() pdata.Resource { - resource := pdata.NewResource() + logResourceFn: func() pcommon.Resource { + resource := pcommon.NewResource() resource.Attributes().InsertString("resourceAttr1", "some_string") resource.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type-from-resource-attr") resource.Attributes().InsertString(splunk.DefaultIndexLabel, "index-resource") @@ -405,19 +406,19 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { }, { name: "with severity", - logRecordFn: func() pdata.LogRecord { - logRecord := pdata.NewLogRecord() + logRecordFn: func() plog.LogRecord { + logRecord := plog.NewLogRecord() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(splunk.DefaultSourceLabel, "myapp") logRecord.Attributes().InsertString(splunk.DefaultSourceTypeLabel, "myapp-type") logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost") logRecord.Attributes().InsertString("custom", "custom") logRecord.SetSeverityText("DEBUG") - logRecord.SetSeverityNumber(pdata.SeverityNumberDEBUG) + logRecord.SetSeverityNumber(plog.SeverityNumberDEBUG) logRecord.SetTimestamp(ts) return logRecord }, - logResourceFn: pdata.NewResource, + logResourceFn: pcommon.NewResource, configDataFn: func() *Config { config := createDefaultConfig().(*Config) config.Source = "source" @@ -425,7 +426,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { return config }, wantSplunkEvents: []*splunk.Event{ - commonLogSplunkEvent("mylog", ts, map[string]interface{}{"custom": "custom", "otel.log.severity.number": pdata.SeverityNumberDEBUG, "otel.log.severity.text": "DEBUG"}, + commonLogSplunkEvent("mylog", ts, map[string]interface{}{"custom": "custom", "otel.log.severity.number": plog.SeverityNumberDEBUG, "otel.log.severity.text": "DEBUG"}, "myhost", "myapp", "myapp-type"), }, }, @@ -443,7 +444,7 @@ func Test_mapLogRecordToSplunkEvent(t *testing.T) { func commonLogSplunkEvent( event interface{}, - ts pdata.Timestamp, + ts pcommon.Timestamp, fields map[string]interface{}, host string, source string, @@ -460,7 +461,7 @@ func commonLogSplunkEvent( } func Test_emptyLogRecord(t *testing.T) { - event := mapLogRecordToSplunkEvent(pdata.NewResource(), pdata.NewLogRecord(), &Config{}, zap.NewNop()) + event := mapLogRecordToSplunkEvent(pcommon.NewResource(), plog.NewLogRecord(), &Config{}, zap.NewNop()) assert.Nil(t, event.Time) assert.Equal(t, event.Host, "unknown") assert.Zero(t, event.Source) diff --git a/exporter/splunkhecexporter/metricdata_to_splunk.go b/exporter/splunkhecexporter/metricdata_to_splunk.go index e17c20bf75ea..3d47b5750040 100644 --- a/exporter/splunkhecexporter/metricdata_to_splunk.go +++ b/exporter/splunkhecexporter/metricdata_to_splunk.go @@ -18,7 +18,8 @@ import ( "math" "strconv" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -58,7 +59,7 @@ func sanitizeFloat(value float64) interface{} { return value } -func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, logger *zap.Logger) []*splunk.Event { +func mapMetricToSplunkEvent(res pcommon.Resource, m pmetric.Metric, config *Config, logger *zap.Logger) []*splunk.Event { sourceKey := config.HecToOtelAttrs.Source sourceTypeKey := config.HecToOtelAttrs.SourceType indexKey := config.HecToOtelAttrs.Index @@ -69,7 +70,7 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, index := config.Index commonFields := map[string]interface{}{} - res.Attributes().Range(func(k string, v pdata.Value) bool { + res.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case hostKey: host = v.StringVal() @@ -88,7 +89,7 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, }) metricFieldName := splunkMetricValue + ":" + m.Name() switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: pts := m.Gauge().DataPoints() splunkMetrics := make([]*splunk.Event, pts.Len()) @@ -97,16 +98,16 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) switch dataPt.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: fields[metricFieldName] = dataPt.IntVal() - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: fields[metricFieldName] = sanitizeFloat(dataPt.DoubleVal()) } - fields[splunkMetricTypeKey] = pdata.MetricDataTypeGauge.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeGauge.String() splunkMetrics[gi] = createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields) } return splunkMetrics - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: pts := m.Histogram().DataPoints() var splunkMetrics []*splunk.Event for gi := 0; gi < pts.Len(); gi++ { @@ -118,14 +119,14 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) fields[metricFieldName+sumSuffix] = dataPt.Sum() - fields[splunkMetricTypeKey] = pdata.MetricDataTypeHistogram.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeHistogram.String() splunkMetrics = append(splunkMetrics, createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields)) } { fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) fields[metricFieldName+countSuffix] = dataPt.Count() - fields[splunkMetricTypeKey] = pdata.MetricDataTypeHistogram.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeHistogram.String() splunkMetrics = append(splunkMetrics, createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields)) } // Spec says counts is optional but if present it must have one more @@ -141,7 +142,7 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, fields["le"] = float64ToDimValue(bounds[bi]) value += counts[bi] fields[metricFieldName+bucketSuffix] = value - fields[splunkMetricTypeKey] = pdata.MetricDataTypeHistogram.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeHistogram.String() sm := createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields) splunkMetrics = append(splunkMetrics, sm) } @@ -151,13 +152,13 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, populateAttributes(fields, dataPt.Attributes()) fields["le"] = float64ToDimValue(math.Inf(1)) fields[metricFieldName+bucketSuffix] = value + counts[len(counts)-1] - fields[splunkMetricTypeKey] = pdata.MetricDataTypeHistogram.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeHistogram.String() sm := createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields) splunkMetrics = append(splunkMetrics, sm) } } return splunkMetrics - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: pts := m.Sum().DataPoints() splunkMetrics := make([]*splunk.Event, pts.Len()) for gi := 0; gi < pts.Len(); gi++ { @@ -165,17 +166,17 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) switch dataPt.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: fields[metricFieldName] = dataPt.IntVal() - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: fields[metricFieldName] = sanitizeFloat(dataPt.DoubleVal()) } - fields[splunkMetricTypeKey] = pdata.MetricDataTypeSum.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeSum.String() sm := createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields) splunkMetrics[gi] = sm } return splunkMetrics - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: pts := m.Summary().DataPoints() var splunkMetrics []*splunk.Event for gi := 0; gi < pts.Len(); gi++ { @@ -185,7 +186,7 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) fields[metricFieldName+sumSuffix] = dataPt.Sum() - fields[splunkMetricTypeKey] = pdata.MetricDataTypeSummary.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeSummary.String() sm := createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields) splunkMetrics = append(splunkMetrics, sm) } @@ -193,7 +194,7 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, fields := cloneMap(commonFields) populateAttributes(fields, dataPt.Attributes()) fields[metricFieldName+countSuffix] = dataPt.Count() - fields[splunkMetricTypeKey] = pdata.MetricDataTypeSummary.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeSummary.String() sm := createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields) splunkMetrics = append(splunkMetrics, sm) } @@ -205,13 +206,13 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, dp := dataPt.QuantileValues().At(bi) fields["qt"] = float64ToDimValue(dp.Quantile()) fields[metricFieldName+"_"+strconv.FormatFloat(dp.Quantile(), 'f', -1, 64)] = sanitizeFloat(dp.Value()) - fields[splunkMetricTypeKey] = pdata.MetricDataTypeSummary.String() + fields[splunkMetricTypeKey] = pmetric.MetricDataTypeSummary.String() sm := createEvent(dataPt.Timestamp(), host, source, sourceType, index, fields) splunkMetrics = append(splunkMetrics, sm) } } return splunkMetrics - case pdata.MetricDataTypeNone: + case pmetric.MetricDataTypeNone: fallthrough default: logger.Warn( @@ -221,7 +222,7 @@ func mapMetricToSplunkEvent(res pdata.Resource, m pdata.Metric, config *Config, } } -func createEvent(timestamp pdata.Timestamp, host string, source string, sourceType string, index string, fields map[string]interface{}) *splunk.Event { +func createEvent(timestamp pcommon.Timestamp, host string, source string, sourceType string, index string, fields map[string]interface{}) *splunk.Event { return &splunk.Event{ Time: timestampToSecondsWithMillisecondPrecision(timestamp), Host: host, @@ -234,8 +235,8 @@ func createEvent(timestamp pdata.Timestamp, host string, source string, sourceTy } -func populateAttributes(fields map[string]interface{}, attributeMap pdata.Map) { - attributeMap.Range(func(k string, v pdata.Value) bool { +func populateAttributes(fields map[string]interface{}, attributeMap pcommon.Map) { + attributeMap.Range(func(k string, v pcommon.Value) bool { fields[k] = v.AsString() return true }) @@ -249,7 +250,7 @@ func cloneMap(fields map[string]interface{}) map[string]interface{} { return newFields } -func timestampToSecondsWithMillisecondPrecision(ts pdata.Timestamp) *float64 { +func timestampToSecondsWithMillisecondPrecision(ts pcommon.Timestamp) *float64 { if ts == 0 { // some telemetry sources send data with timestamps set to 0 by design, as their original target destinations // (i.e. before Open Telemetry) are setup with the know-how on how to consume them. In this case, diff --git a/exporter/splunkhecexporter/metricdata_to_splunk_test.go b/exporter/splunkhecexporter/metricdata_to_splunk_test.go index 3ca73f96769e..604d249de829 100644 --- a/exporter/splunkhecexporter/metricdata_to_splunk_test.go +++ b/exporter/splunkhecexporter/metricdata_to_splunk_test.go @@ -23,7 +23,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -33,7 +34,7 @@ func Test_metricDataToSplunk(t *testing.T) { unixSecs := int64(1574092046) unixNSecs := int64(11 * time.Millisecond) tsUnix := time.Unix(unixSecs, unixNSecs) - ts := pdata.NewTimestampFromTime(tsUnix) + ts := pcommon.NewTimestampFromTime(tsUnix) tsMSecs := timestampToSecondsWithMillisecondPrecision(ts) doubleVal := 1234.5678 @@ -44,20 +45,20 @@ func Test_metricDataToSplunk(t *testing.T) { tests := []struct { name string - resourceFn func() pdata.Resource - metricsDataFn func() pdata.Metric + resourceFn func() pcommon.Resource + metricsDataFn func() pmetric.Metric wantSplunkMetrics []*splunk.Event configFn func() *Config }{ { name: "nil_gauge_value", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - gauge := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + gauge := pmetric.NewMetric() gauge.SetName("gauge_with_dims") - gauge.SetDataType(pdata.MetricDataTypeGauge) + gauge.SetDataType(pmetric.MetricDataTypeGauge) return gauge }, configFn: func() *Config { @@ -66,15 +67,15 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "nan_gauge_value", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - gauge := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + gauge := pmetric.NewMetric() gauge.SetName("gauge_with_dims") - gauge.SetDataType(pdata.MetricDataTypeGauge) + gauge.SetDataType(pmetric.MetricDataTypeGauge) dp := gauge.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) dp.SetDoubleVal(math.NaN()) return gauge }, @@ -87,15 +88,15 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "+Inf_gauge_value", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - gauge := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + gauge := pmetric.NewMetric() gauge.SetName("gauge_with_dims") - gauge.SetDataType(pdata.MetricDataTypeGauge) + gauge.SetDataType(pmetric.MetricDataTypeGauge) dp := gauge.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) dp.SetDoubleVal(math.Inf(1)) return gauge }, @@ -108,15 +109,15 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "-Inf_gauge_value", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - gauge := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + gauge := pmetric.NewMetric() gauge.SetName("gauge_with_dims") - gauge.SetDataType(pdata.MetricDataTypeGauge) + gauge.SetDataType(pmetric.MetricDataTypeGauge) dp := gauge.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) dp.SetDoubleVal(math.Inf(-1)) return gauge }, @@ -129,13 +130,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "nil_histogram_value", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - histogram := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + histogram := pmetric.NewMetric() histogram.SetName("histogram_with_dims") - histogram.SetDataType(pdata.MetricDataTypeHistogram) + histogram.SetDataType(pmetric.MetricDataTypeHistogram) return histogram }, configFn: func() *Config { @@ -144,13 +145,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "nil_sum_value", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - sum := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + sum := pmetric.NewMetric() sum.SetName("sum_with_dims") - sum.SetDataType(pdata.MetricDataTypeSum) + sum.SetDataType(pmetric.MetricDataTypeSum) return sum }, configFn: func() *Config { @@ -159,13 +160,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "gauge_empty_data_point", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - gauge := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + gauge := pmetric.NewMetric() gauge.SetName("gauge_with_dims") - gauge.SetDataType(pdata.MetricDataTypeGauge) + gauge.SetDataType(pmetric.MetricDataTypeGauge) gauge.Gauge().DataPoints().AppendEmpty() return gauge }, @@ -175,13 +176,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "histogram_empty_data_point", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - histogram := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + histogram := pmetric.NewMetric() histogram.SetName("histogram_with_dims") - histogram.SetDataType(pdata.MetricDataTypeHistogram) + histogram.SetDataType(pmetric.MetricDataTypeHistogram) histogram.Histogram().DataPoints().AppendEmpty() return histogram }, @@ -191,13 +192,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "sum_empty_data_point", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - sum := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + sum := pmetric.NewMetric() sum.SetName("sum_with_dims") - sum.SetDataType(pdata.MetricDataTypeSum) + sum.SetDataType(pmetric.MetricDataTypeSum) sum.Sum().DataPoints().AppendEmpty() return sum }, @@ -207,8 +208,8 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "int_gauge", - resourceFn: func() pdata.Resource { - res := pdata.NewResource() + resourceFn: func() pcommon.Resource { + res := pcommon.NewResource() res.Attributes().InsertString("com.splunk.source", "mysource") res.Attributes().InsertString("host.name", "myhost") res.Attributes().InsertString("com.splunk.sourcetype", "mysourcetype") @@ -217,14 +218,14 @@ func Test_metricDataToSplunk(t *testing.T) { res.Attributes().InsertString("k1", "v1") return res }, - metricsDataFn: func() pdata.Metric { - intGauge := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + intGauge := pmetric.NewMetric() intGauge.SetName("gauge_int_with_dims") - intGauge.SetDataType(pdata.MetricDataTypeGauge) + intGauge.SetDataType(pmetric.MetricDataTypeGauge) intDataPt := intGauge.Gauge().DataPoints().AppendEmpty() intDataPt.SetIntVal(int64Val) - intDataPt.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) - intDataPt.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + intDataPt.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) + intDataPt.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) return intGauge }, @@ -238,8 +239,8 @@ func Test_metricDataToSplunk(t *testing.T) { { name: "double_gauge", - resourceFn: func() pdata.Resource { - res := pdata.NewResource() + resourceFn: func() pcommon.Resource { + res := pcommon.NewResource() res.Attributes().InsertString("com.splunk.source", "mysource") res.Attributes().InsertString("host.name", "myhost") res.Attributes().InsertString("com.splunk.sourcetype", "mysourcetype") @@ -248,14 +249,14 @@ func Test_metricDataToSplunk(t *testing.T) { res.Attributes().InsertString("k1", "v1") return res }, - metricsDataFn: func() pdata.Metric { + metricsDataFn: func() pmetric.Metric { - doubleGauge := pdata.NewMetric() + doubleGauge := pmetric.NewMetric() doubleGauge.SetName("gauge_double_with_dims") - doubleGauge.SetDataType(pdata.MetricDataTypeGauge) + doubleGauge.SetDataType(pmetric.MetricDataTypeGauge) doubleDataPt := doubleGauge.Gauge().DataPoints().AppendEmpty() doubleDataPt.SetDoubleVal(doubleVal) - doubleDataPt.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + doubleDataPt.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) return doubleGauge }, @@ -269,19 +270,19 @@ func Test_metricDataToSplunk(t *testing.T) { { name: "histogram_no_upper_bound", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - histogram := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + histogram := pmetric.NewMetric() histogram.SetName("double_histogram_with_dims") - histogram.SetDataType(pdata.MetricDataTypeHistogram) + histogram.SetDataType(pmetric.MetricDataTypeHistogram) histogramPt := histogram.Histogram().DataPoints().AppendEmpty() histogramPt.SetExplicitBounds(distributionBounds) histogramPt.SetBucketCounts([]uint64{4, 2, 3}) histogramPt.SetSum(23) histogramPt.SetCount(7) - histogramPt.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + histogramPt.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) return histogram }, configFn: func() *Config { @@ -290,19 +291,19 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "histogram", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - histogram := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + histogram := pmetric.NewMetric() histogram.SetName("double_histogram_with_dims") - histogram.SetDataType(pdata.MetricDataTypeHistogram) + histogram.SetDataType(pmetric.MetricDataTypeHistogram) histogramPt := histogram.Histogram().DataPoints().AppendEmpty() histogramPt.SetExplicitBounds(distributionBounds) histogramPt.SetBucketCounts(distributionCounts) histogramPt.SetSum(23) histogramPt.SetCount(7) - histogramPt.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + histogramPt.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) return histogram }, wantSplunkMetrics: []*splunk.Event{ @@ -396,13 +397,13 @@ func Test_metricDataToSplunk(t *testing.T) { { name: "int_sum", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - intSum := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + intSum := pmetric.NewMetric() intSum.SetName("int_sum_with_dims") - intSum.SetDataType(pdata.MetricDataTypeSum) + intSum.SetDataType(pmetric.MetricDataTypeSum) intDataPt := intSum.Sum().DataPoints().AppendEmpty() intDataPt.SetTimestamp(ts) intDataPt.SetIntVal(62) @@ -429,13 +430,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "double_sum", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - doubleSum := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + doubleSum := pmetric.NewMetric() doubleSum.SetName("double_sum_with_dims") - doubleSum.SetDataType(pdata.MetricDataTypeSum) + doubleSum.SetDataType(pmetric.MetricDataTypeSum) doubleDataPt := doubleSum.Sum().DataPoints().AppendEmpty() doubleDataPt.SetTimestamp(ts) doubleDataPt.SetDoubleVal(62) @@ -462,13 +463,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "summary", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - summary := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + summary := pmetric.NewMetric() summary.SetName("summary") - summary.SetDataType(pdata.MetricDataTypeSummary) + summary.SetDataType(pmetric.MetricDataTypeSummary) summaryPt := summary.Summary().DataPoints().AppendEmpty() summaryPt.SetTimestamp(ts) summaryPt.SetStartTimestamp(ts) @@ -544,13 +545,13 @@ func Test_metricDataToSplunk(t *testing.T) { }, { name: "unknown_type", - resourceFn: func() pdata.Resource { + resourceFn: func() pcommon.Resource { return newMetricsWithResources() }, - metricsDataFn: func() pdata.Metric { - metric := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName("unknown_with_dims") - metric.SetDataType(pdata.MetricDataTypeNone) + metric.SetDataType(pmetric.MetricDataTypeNone) return metric }, wantSplunkMetrics: nil, @@ -561,8 +562,8 @@ func Test_metricDataToSplunk(t *testing.T) { { name: "custom_config_mapping", - resourceFn: func() pdata.Resource { - res := pdata.NewResource() + resourceFn: func() pcommon.Resource { + res := pcommon.NewResource() res.Attributes().InsertString("mysource", "mysource2") res.Attributes().InsertString("myhost", "myhost2") res.Attributes().InsertString("mysourcetype", "mysourcetype2") @@ -571,13 +572,13 @@ func Test_metricDataToSplunk(t *testing.T) { res.Attributes().InsertString("k1", "v1") return res }, - metricsDataFn: func() pdata.Metric { - doubleGauge := pdata.NewMetric() + metricsDataFn: func() pmetric.Metric { + doubleGauge := pmetric.NewMetric() doubleGauge.SetName("gauge_double_with_dims") - doubleGauge.SetDataType(pdata.MetricDataTypeGauge) + doubleGauge.SetDataType(pmetric.MetricDataTypeGauge) doubleDataPt := doubleGauge.Gauge().DataPoints().AppendEmpty() doubleDataPt.SetDoubleVal(doubleVal) - doubleDataPt.SetTimestamp(pdata.NewTimestampFromTime(tsUnix)) + doubleDataPt.SetTimestamp(pcommon.NewTimestampFromTime(tsUnix)) return doubleGauge }, @@ -639,27 +640,27 @@ func commonSplunkMetric( } func TestTimestampFormat(t *testing.T) { - ts := pdata.Timestamp(32001000345) + ts := pcommon.Timestamp(32001000345) assert.Equal(t, 32.001, *timestampToSecondsWithMillisecondPrecision(ts)) } func TestTimestampFormatRounding(t *testing.T) { - ts := pdata.Timestamp(32001999345) + ts := pcommon.Timestamp(32001999345) assert.Equal(t, 32.002, *timestampToSecondsWithMillisecondPrecision(ts)) } func TestTimestampFormatRoundingWithNanos(t *testing.T) { - ts := pdata.Timestamp(9999999999991500001) + ts := pcommon.Timestamp(9999999999991500001) assert.Equal(t, 9999999999.992, *timestampToSecondsWithMillisecondPrecision(ts)) } func TestNilTimeWhenTimestampIsZero(t *testing.T) { - ts := pdata.Timestamp(0) + ts := pcommon.Timestamp(0) assert.Nil(t, timestampToSecondsWithMillisecondPrecision(ts)) } -func newMetricsWithResources() pdata.Resource { - res := pdata.NewResource() +func newMetricsWithResources() pcommon.Resource { + res := pcommon.NewResource() res.Attributes().InsertString("k0", "v0") res.Attributes().InsertString("k1", "v1") return res diff --git a/exporter/splunkhecexporter/tracedata_to_splunk.go b/exporter/splunkhecexporter/tracedata_to_splunk.go index 1a569d18a29a..d64b2ddf13f5 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk.go @@ -15,7 +15,8 @@ package splunkhecexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -25,7 +26,7 @@ import ( type hecEvent struct { Attributes map[string]interface{} `json:"attributes,omitempty"` Name string `json:"name"` - Timestamp pdata.Timestamp `json:"timestamp"` + Timestamp pcommon.Timestamp `json:"timestamp"` } // hecLink is a data structure holding a span link to export explicitly to Splunk HEC. @@ -33,7 +34,7 @@ type hecLink struct { Attributes map[string]interface{} `json:"attributes,omitempty"` TraceID string `json:"trace_id"` SpanID string `json:"span_id"` - TraceState pdata.TraceState `json:"trace_state"` + TraceState ptrace.TraceState `json:"trace_state"` } // hecSpanStatus is a data structure holding the status of a span to export explicitly to Splunk HEC. @@ -49,15 +50,15 @@ type hecSpan struct { ParentSpan string `json:"parent_span_id"` Name string `json:"name"` Attributes map[string]interface{} `json:"attributes,omitempty"` - EndTime pdata.Timestamp `json:"end_time"` + EndTime pcommon.Timestamp `json:"end_time"` Kind string `json:"kind"` Status hecSpanStatus `json:"status,omitempty"` - StartTime pdata.Timestamp `json:"start_time"` + StartTime pcommon.Timestamp `json:"start_time"` Events []hecEvent `json:"events,omitempty"` Links []hecLink `json:"links,omitempty"` } -func mapSpanToSplunkEvent(resource pdata.Resource, span pdata.Span, config *Config, logger *zap.Logger) *splunk.Event { +func mapSpanToSplunkEvent(resource pcommon.Resource, span ptrace.Span, config *Config, logger *zap.Logger) *splunk.Event { sourceKey := config.HecToOtelAttrs.Source sourceTypeKey := config.HecToOtelAttrs.SourceType indexKey := config.HecToOtelAttrs.Index @@ -68,7 +69,7 @@ func mapSpanToSplunkEvent(resource pdata.Resource, span pdata.Span, config *Conf sourceType := config.SourceType index := config.Index commonFields := map[string]interface{}{} - resource.Attributes().Range(func(k string, v pdata.Value) bool { + resource.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case hostKey: host = v.StringVal() @@ -99,9 +100,9 @@ func mapSpanToSplunkEvent(resource pdata.Resource, span pdata.Span, config *Conf return se } -func toHecSpan(logger *zap.Logger, span pdata.Span) hecSpan { +func toHecSpan(logger *zap.Logger, span ptrace.Span) hecSpan { attributes := map[string]interface{}{} - span.Attributes().Range(func(k string, v pdata.Value) bool { + span.Attributes().Range(func(k string, v pcommon.Value) bool { attributes[k] = convertAttributeValue(v, logger) return true }) @@ -110,7 +111,7 @@ func toHecSpan(logger *zap.Logger, span pdata.Span) hecSpan { for i := 0; i < span.Links().Len(); i++ { link := span.Links().At(i) linkAttributes := map[string]interface{}{} - link.Attributes().Range(func(k string, v pdata.Value) bool { + link.Attributes().Range(func(k string, v pcommon.Value) bool { linkAttributes[k] = convertAttributeValue(v, logger) return true }) @@ -125,7 +126,7 @@ func toHecSpan(logger *zap.Logger, span pdata.Span) hecSpan { for i := 0; i < span.Events().Len(); i++ { event := span.Events().At(i) eventAttributes := map[string]interface{}{} - event.Attributes().Range(func(k string, v pdata.Value) bool { + event.Attributes().Range(func(k string, v pcommon.Value) bool { eventAttributes[k] = convertAttributeValue(v, logger) return true }) diff --git a/exporter/splunkhecexporter/tracedata_to_splunk_test.go b/exporter/splunkhecexporter/tracedata_to_splunk_test.go index 81bbc58d40f8..2ec26ccea042 100644 --- a/exporter/splunkhecexporter/tracedata_to_splunk_test.go +++ b/exporter/splunkhecexporter/tracedata_to_splunk_test.go @@ -20,7 +20,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -28,18 +29,18 @@ import ( func Test_traceDataToSplunk(t *testing.T) { logger := zap.NewNop() - ts := pdata.Timestamp(123) + ts := pcommon.Timestamp(123) tests := []struct { name string - traceDataFn func() pdata.Traces + traceDataFn func() ptrace.Traces wantSplunkEvent *splunk.Event configFn func() *Config }{ { name: "valid", - traceDataFn: func() pdata.Traces { - traces := pdata.NewTraces() + traceDataFn: func() ptrace.Traces { + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("com.splunk.source", "myservice") rs.Resource().Attributes().InsertString("host.name", "myhost") @@ -56,8 +57,8 @@ func Test_traceDataToSplunk(t *testing.T) { }, { name: "custom_config", - traceDataFn: func() pdata.Traces { - traces := pdata.NewTraces() + traceDataFn: func() ptrace.Traces { + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("mysource", "myservice") rs.Resource().Attributes().InsertString("myhost", "myhost") @@ -98,7 +99,7 @@ func Test_traceDataToSplunk(t *testing.T) { } } -func initSpan(name string, ts *pdata.Timestamp, span pdata.Span) { +func initSpan(name string, ts *pcommon.Timestamp, span ptrace.Span) { span.Attributes().InsertString("foo", "bar") span.SetName(name) if ts != nil { @@ -109,14 +110,14 @@ func initSpan(name string, ts *pdata.Timestamp, span pdata.Span) { bytes, _ := hex.DecodeString("12345678") var traceID [16]byte copy(traceID[:], bytes) - spanLink.SetTraceID(pdata.NewTraceID(traceID)) + spanLink.SetTraceID(pcommon.NewTraceID(traceID)) bytes, _ = hex.DecodeString("1234") var spanID [8]byte copy(spanID[:], bytes) - spanLink.SetSpanID(pdata.NewSpanID(spanID)) + spanLink.SetSpanID(pcommon.NewSpanID(spanID)) spanLink.Attributes().InsertInt("foo", 1) spanLink.Attributes().InsertBool("bar", false) - foobarContents := pdata.NewValueSlice() + foobarContents := pcommon.NewValueSlice() foobarContents.SliceVal().AppendEmpty().SetStringVal("a") foobarContents.SliceVal().AppendEmpty().SetStringVal("b") spanLink.Attributes().Insert("foobar", foobarContents) @@ -131,7 +132,7 @@ func initSpan(name string, ts *pdata.Timestamp, span pdata.Span) { func commonSplunkEvent( name string, - ts pdata.Timestamp, + ts pcommon.Timestamp, ) *splunk.Event { return &splunk.Event{ Time: timestampToSecondsWithMillisecondPrecision(ts), diff --git a/exporter/stackdriverexporter/go.mod b/exporter/stackdriverexporter/go.mod index 83193514ddff..e39feb5c54ad 100644 --- a/exporter/stackdriverexporter/go.mod +++ b/exporter/stackdriverexporter/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googlecloudexporter v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -17,7 +17,7 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/collector v0.26.1-0.20220307211504-dc45061a44f9 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v1.3.0 // indirect github.com/aws/aws-sdk-go v1.43.32 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -29,19 +29,19 @@ require ( github.com/googleapis/gax-go/v2 v2.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/otel/sdk v1.6.3 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect @@ -64,3 +64,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/googl replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/stackdriverexporter/go.sum b/exporter/stackdriverexporter/go.sum index 55a846a5b0e5..30a6b908a272 100644 --- a/exporter/stackdriverexporter/go.sum +++ b/exporter/stackdriverexporter/go.sum @@ -57,7 +57,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 cloud.google.com/go/trace v1.0.0 h1:laKx2y7IWMjguCe5zZx6n7qLtREk4kyE69SXVC0VSN8= cloud.google.com/go/trace v1.0.0/go.mod h1:4iErSByzxkyHWzzlAj63/Gmjz0NH1ASqhJguHpGcr6A= code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= contrib.go.opencensus.io/exporter/stackdriver v0.13.11 h1:YzmWJ2OT2K3ouXyMm5FmFQPoDs5TfLjx6Xn5x5CLN0I= contrib.go.opencensus.io/exporter/stackdriver v0.13.11/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -99,8 +99,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -153,10 +153,12 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -254,7 +256,6 @@ github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTK github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3 h1:eHv/jVY/JNop1xg2J9cBb4EzyMpWZoNCP1BslSAIkOI= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3/go.mod h1:h/KNeRx7oYU4SpA4SoY7W2/NxDKEEVuwA6j9A27L4OI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -302,8 +303,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -373,6 +374,7 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -383,6 +385,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= @@ -402,12 +405,11 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -423,8 +425,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -439,11 +441,14 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.24.0/go.mod h1:7W3JSDYTtH3qKKHrS1fMiwLtK7iZFLPq1+7htfspX/E= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.26.0/go.mod h1:4vatbW3QwS11DK0H0SB7FR31/VbthXcYorswdkVXdyg= @@ -462,7 +467,7 @@ go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOU go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= -go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= +go.opentelemetry.io/otel/exporters/prometheus v0.29.0/go.mod h1:Er2VVJQZbHysogooLNchdZ3MLYoI7+d15mHmrRlRJCU= go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= go.opentelemetry.io/otel/internal/metric v0.24.0/go.mod h1:PSkQG+KuApZjBpC6ea6082ZrWUUy/w132tJ/LOU3TXk= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= @@ -475,10 +480,10 @@ go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= +go.opentelemetry.io/otel/sdk/metric v0.29.0/go.mod h1:IFkFNKI8Gq8zBdqOKdODCL9+LInBZLXaGpqSIKphNuU= go.opentelemetry.io/otel/trace v1.0.0-RC3/go.mod h1:VUt2TUYd8S2/ZRX09ZDFZQwn2RqfMB5MzO17jBojGxo= go.opentelemetry.io/otel/trace v1.0.0/go.mod h1:PXTWqayeFUlJV1YDNhsJYB184+IvAH814St6o6ajzIs= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= @@ -681,7 +686,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/exporter/sumologicexporter/carbon_formatter.go b/exporter/sumologicexporter/carbon_formatter.go index 7ae86c33119a..45b3a1e3fc9f 100644 --- a/exporter/sumologicexporter/carbon_formatter.go +++ b/exporter/sumologicexporter/carbon_formatter.go @@ -18,7 +18,8 @@ import ( "fmt" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // carbon2TagString returns all attributes as space spearated key=value pairs. @@ -37,7 +38,7 @@ func carbon2TagString(record metricPair) string { } returnValue := make([]string, 0, length) - record.attributes.Range(func(k string, v pdata.Value) bool { + record.attributes.Range(func(k string, v pcommon.Value) bool { if k == "name" || k == "unit" { k = fmt.Sprintf("_%s", k) } @@ -65,15 +66,15 @@ func sanitizeCarbonString(text string) string { // carbon2NumberRecord converts NumberDataPoint to carbon2 metric string // with additional information from metricPair. -func carbon2NumberRecord(record metricPair, dataPoint pdata.NumberDataPoint) string { +func carbon2NumberRecord(record metricPair, dataPoint pmetric.NumberDataPoint) string { switch dataPoint.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return fmt.Sprintf("%s %g %d", carbon2TagString(record), dataPoint.DoubleVal(), dataPoint.Timestamp()/1e9, ) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return fmt.Sprintf("%s %d %d", carbon2TagString(record), dataPoint.IntVal(), @@ -88,21 +89,21 @@ func carbon2Metric2String(record metricPair) string { var nextLines []string switch record.metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps := record.metric.Gauge().DataPoints() nextLines = make([]string, 0, dps.Len()) for i := 0; i < dps.Len(); i++ { nextLines = append(nextLines, carbon2NumberRecord(record, dps.At(i))) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dps := record.metric.Sum().DataPoints() nextLines = make([]string, 0, dps.Len()) for i := 0; i < dps.Len(); i++ { nextLines = append(nextLines, carbon2NumberRecord(record, dps.At(i))) } // Skip complex metrics - case pdata.MetricDataTypeHistogram: - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeHistogram: + case pmetric.MetricDataTypeSummary: } return strings.Join(nextLines, "\n") diff --git a/exporter/sumologicexporter/exporter.go b/exporter/sumologicexporter/exporter.go index 7143362c5f8e..16a4af780145 100644 --- a/exporter/sumologicexporter/exporter.go +++ b/exporter/sumologicexporter/exporter.go @@ -22,7 +22,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" ) @@ -132,12 +134,12 @@ func (se *sumologicexporter) start(_ context.Context, host component.Host) (err // pushLogsData groups data with common metadata and sends them as separate batched requests. // It returns the number of unsent logs and an error which contains a list of dropped records // so they can be handled by OTC retry mechanism -func (se *sumologicexporter) pushLogsData(ctx context.Context, ld pdata.Logs) error { +func (se *sumologicexporter) pushLogsData(ctx context.Context, ld plog.Logs) error { var ( - currentMetadata = newFields(pdata.NewMap()) - previousMetadata = newFields(pdata.NewMap()) + currentMetadata = newFields(pcommon.NewMap()) + previousMetadata = newFields(pcommon.NewMap()) errs error - droppedRecords []pdata.LogRecord + droppedRecords []plog.LogRecord err error ) @@ -172,7 +174,7 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld pdata.Logs) er // copy resource attributes into logs attributes // log attributes have precedence over resource attributes - rl.Resource().Attributes().Range(func(k string, v pdata.Value) bool { + rl.Resource().Attributes().Range(func(k string, v pcommon.Value) bool { log.Attributes().Insert(k, v) return true }) @@ -181,7 +183,7 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld pdata.Logs) er // If metadata differs from currently buffered, flush the buffer if currentMetadata.string() != previousMetadata.string() && previousMetadata.string() != "" { - var dropped []pdata.LogRecord + var dropped []plog.LogRecord dropped, err = sdr.sendLogs(ctx, previousMetadata) if err != nil { errs = multierr.Append(errs, err) @@ -194,7 +196,7 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld pdata.Logs) er previousMetadata = currentMetadata // add log to the buffer - var dropped []pdata.LogRecord + var dropped []plog.LogRecord dropped, err = sdr.batchLog(ctx, log, previousMetadata) if err != nil { droppedRecords = append(droppedRecords, dropped...) @@ -213,7 +215,7 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld pdata.Logs) er if len(droppedRecords) > 0 { // Move all dropped records to Logs - droppedLogs := pdata.NewLogs() + droppedLogs := plog.NewLogs() rls = droppedLogs.ResourceLogs() ills := rls.AppendEmpty().ScopeLogs() logs := ills.AppendEmpty().LogRecords() @@ -232,13 +234,13 @@ func (se *sumologicexporter) pushLogsData(ctx context.Context, ld pdata.Logs) er // pushMetricsData groups data with common metadata and send them as separate batched requests // it returns number of unsent metrics and error which contains list of dropped records // so they can be handle by the OTC retry mechanism -func (se *sumologicexporter) pushMetricsData(ctx context.Context, md pdata.Metrics) error { +func (se *sumologicexporter) pushMetricsData(ctx context.Context, md pmetric.Metrics) error { var ( - currentMetadata = newFields(pdata.NewMap()) - previousMetadata = newFields(pdata.NewMap()) + currentMetadata = newFields(pcommon.NewMap()) + previousMetadata = newFields(pcommon.NewMap()) errs error droppedRecords []metricPair - attributes pdata.Map + attributes pcommon.Map ) c, err := newCompressor(se.config.CompressEncoding) @@ -311,7 +313,7 @@ func (se *sumologicexporter) pushMetricsData(ctx context.Context, md pdata.Metri if len(droppedRecords) > 0 { // Move all dropped records to Metrics - droppedMetrics := pdata.NewMetrics() + droppedMetrics := pmetric.NewMetrics() rms := droppedMetrics.ResourceMetrics() rms.EnsureCapacity(len(droppedRecords)) for _, record := range droppedRecords { diff --git a/exporter/sumologicexporter/exporter_test.go b/exporter/sumologicexporter/exporter_test.go index 8d4fdff1b27f..8768ffeea447 100644 --- a/exporter/sumologicexporter/exporter_test.go +++ b/exporter/sumologicexporter/exporter_test.go @@ -27,11 +27,11 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" ) -func LogRecordsToLogs(records []pdata.LogRecord) pdata.Logs { - logs := pdata.NewLogs() +func LogRecordsToLogs(records []plog.LogRecord) plog.Logs { + logs := plog.NewLogs() logsSlice := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() for _, record := range records { tgt := logsSlice.AppendEmpty() diff --git a/exporter/sumologicexporter/fields.go b/exporter/sumologicexporter/fields.go index 88ec10f9f2fc..46f40f19b8a8 100644 --- a/exporter/sumologicexporter/fields.go +++ b/exporter/sumologicexporter/fields.go @@ -19,16 +19,16 @@ import ( "sort" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) // fields represents metadata type fields struct { - orig pdata.Map + orig pcommon.Map replacer *strings.Replacer } -func newFields(attrMap pdata.Map) fields { +func newFields(attrMap pcommon.Map) fields { return fields{ orig: attrMap, replacer: strings.NewReplacer(",", "_", "=", ":", "\n", "_"), @@ -38,7 +38,7 @@ func newFields(attrMap pdata.Map) fields { // string returns fields as ordered key=value string with `, ` as separator func (f fields) string() string { returnValue := make([]string, 0, f.orig.Len()) - f.orig.Range(func(k string, v pdata.Value) bool { + f.orig.Range(func(k string, v pcommon.Value) bool { returnValue = append( returnValue, fmt.Sprintf( diff --git a/exporter/sumologicexporter/filter.go b/exporter/sumologicexporter/filter.go index 5197f6a420c7..bc980178941c 100644 --- a/exporter/sumologicexporter/filter.go +++ b/exporter/sumologicexporter/filter.go @@ -17,7 +17,7 @@ package sumologicexporter // import "github.com/open-telemetry/opentelemetry-col import ( "regexp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) type filter struct { @@ -42,10 +42,10 @@ func newFilter(flds []string) (filter, error) { } // filterIn returns fields which match at least one of the filter regexes -func (f *filter) filterIn(attributes pdata.Map) fields { - returnValue := pdata.NewMap() +func (f *filter) filterIn(attributes pcommon.Map) fields { + returnValue := pcommon.NewMap() - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { for _, regex := range f.regexes { if regex.MatchString(k) { returnValue.Insert(k, v) @@ -59,10 +59,10 @@ func (f *filter) filterIn(attributes pdata.Map) fields { } // filterOut returns fields which don't match any of the filter regexes -func (f *filter) filterOut(attributes pdata.Map) fields { - returnValue := pdata.NewMap() +func (f *filter) filterOut(attributes pcommon.Map) fields { + returnValue := pcommon.NewMap() - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { for _, regex := range f.regexes { if regex.MatchString(k) { return true diff --git a/exporter/sumologicexporter/filter_test.go b/exporter/sumologicexporter/filter_test.go index a58088a3d740..ef35c861e221 100644 --- a/exporter/sumologicexporter/filter_test.go +++ b/exporter/sumologicexporter/filter_test.go @@ -19,11 +19,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestGetMetadata(t *testing.T) { - attributes := pdata.NewMap() + attributes := pcommon.NewMap() attributes.InsertString("key3", "value3") attributes.InsertString("key1", "value1") attributes.InsertString("key2", "value2") @@ -45,7 +45,7 @@ func TestGetMetadata(t *testing.T) { } func TestFilterOutMetadata(t *testing.T) { - attributes := pdata.NewMap() + attributes := pcommon.NewMap() attributes.InsertString("key3", "value3") attributes.InsertString("key1", "value1") attributes.InsertString("key2", "value2") diff --git a/exporter/sumologicexporter/go.mod b/exporter/sumologicexporter/go.mod index 1a752cf61fa6..4b9034ac9501 100644 --- a/exporter/sumologicexporter/go.mod +++ b/exporter/sumologicexporter/go.mod @@ -4,13 +4,13 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect @@ -20,7 +20,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -29,7 +29,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -37,13 +36,13 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/sumologicexporter/go.sum b/exporter/sumologicexporter/go.sum index 7240db255f3c..abfe2f38fdfa 100644 --- a/exporter/sumologicexporter/go.sum +++ b/exporter/sumologicexporter/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -18,16 +17,14 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -39,7 +36,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -124,8 +120,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -173,9 +169,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -191,10 +184,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -204,7 +197,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -243,8 +236,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -269,13 +262,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -300,8 +291,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -311,7 +300,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -325,7 +313,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/exporter/sumologicexporter/graphite_formatter.go b/exporter/sumologicexporter/graphite_formatter.go index 4c79e8dfc6b1..ebc035237e89 100644 --- a/exporter/sumologicexporter/graphite_formatter.go +++ b/exporter/sumologicexporter/graphite_formatter.go @@ -20,7 +20,8 @@ import ( "strings" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type graphiteFormatter struct { @@ -78,19 +79,19 @@ func (gf *graphiteFormatter) format(f fields, metricName string) string { // numberRecord converts NumberDataPoint to graphite metric string // with additional information from fields -func (gf *graphiteFormatter) numberRecord(fs fields, name string, dataPoint pdata.NumberDataPoint) string { +func (gf *graphiteFormatter) numberRecord(fs fields, name string, dataPoint pmetric.NumberDataPoint) string { switch dataPoint.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return fmt.Sprintf("%s %g %d", gf.format(fs, name), dataPoint.DoubleVal(), - dataPoint.Timestamp()/pdata.Timestamp(time.Second), + dataPoint.Timestamp()/pcommon.Timestamp(time.Second), ) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return fmt.Sprintf("%s %d %d", gf.format(fs, name), dataPoint.IntVal(), - dataPoint.Timestamp()/pdata.Timestamp(time.Second), + dataPoint.Timestamp()/pcommon.Timestamp(time.Second), ) } return "" @@ -103,21 +104,21 @@ func (gf *graphiteFormatter) metric2String(record metricPair) string { name := record.metric.Name() switch record.metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps := record.metric.Gauge().DataPoints() nextLines = make([]string, 0, dps.Len()) for i := 0; i < dps.Len(); i++ { nextLines = append(nextLines, gf.numberRecord(fs, name, dps.At(i))) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dps := record.metric.Sum().DataPoints() nextLines = make([]string, 0, dps.Len()) for i := 0; i < dps.Len(); i++ { nextLines = append(nextLines, gf.numberRecord(fs, name, dps.At(i))) } // Skip complex metrics - case pdata.MetricDataTypeHistogram: - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeHistogram: + case pmetric.MetricDataTypeSummary: } return strings.Join(nextLines, "\n") diff --git a/exporter/sumologicexporter/prometheus_formatter.go b/exporter/sumologicexporter/prometheus_formatter.go index 0d6f2b156044..129b720a0da2 100644 --- a/exporter/sumologicexporter/prometheus_formatter.go +++ b/exporter/sumologicexporter/prometheus_formatter.go @@ -20,12 +20,13 @@ import ( "strings" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type dataPoint interface { - Timestamp() pdata.Timestamp - Attributes() pdata.Map + Timestamp() pcommon.Timestamp + Attributes() pcommon.Map } type prometheusFormatter struct { @@ -54,10 +55,10 @@ func newPrometheusFormatter() (prometheusFormatter, error) { } // PrometheusLabels returns all attributes as sanitized prometheus labels string -func (f *prometheusFormatter) tags2String(attr pdata.Map, labels pdata.Map) prometheusTags { - mergedAttributes := pdata.NewMap() +func (f *prometheusFormatter) tags2String(attr pcommon.Map, labels pcommon.Map) prometheusTags { + mergedAttributes := pcommon.NewMap() attr.CopyTo(mergedAttributes) - labels.Range(func(k string, v pdata.Value) bool { + labels.Range(func(k string, v pcommon.Value) bool { mergedAttributes.UpsertString(k, v.StringVal()) return true }) @@ -68,7 +69,7 @@ func (f *prometheusFormatter) tags2String(attr pdata.Map, labels pdata.Map) prom } returnValue := make([]string, 0, length) - mergedAttributes.Range(func(k string, v pdata.Value) bool { + mergedAttributes.Range(func(k string, v pcommon.Value) bool { returnValue = append( returnValue, fmt.Sprintf( @@ -98,40 +99,40 @@ func (f *prometheusFormatter) sanitizeValue(s string) string { } // doubleLine builds metric based on the given arguments where value is float64 -func (f *prometheusFormatter) doubleLine(name string, attributes prometheusTags, value float64, timestamp pdata.Timestamp) string { +func (f *prometheusFormatter) doubleLine(name string, attributes prometheusTags, value float64, timestamp pcommon.Timestamp) string { return fmt.Sprintf( "%s%s %g %d", f.sanitizeKey(name), attributes, value, - timestamp/pdata.Timestamp(time.Millisecond), + timestamp/pcommon.Timestamp(time.Millisecond), ) } // intLine builds metric based on the given arguments where value is int64 -func (f *prometheusFormatter) intLine(name string, attributes prometheusTags, value int64, timestamp pdata.Timestamp) string { +func (f *prometheusFormatter) intLine(name string, attributes prometheusTags, value int64, timestamp pcommon.Timestamp) string { return fmt.Sprintf( "%s%s %d %d", f.sanitizeKey(name), attributes, value, - timestamp/pdata.Timestamp(time.Millisecond), + timestamp/pcommon.Timestamp(time.Millisecond), ) } // uintLine builds metric based on the given arguments where value is uint64 -func (f *prometheusFormatter) uintLine(name string, attributes prometheusTags, value uint64, timestamp pdata.Timestamp) string { +func (f *prometheusFormatter) uintLine(name string, attributes prometheusTags, value uint64, timestamp pcommon.Timestamp) string { return fmt.Sprintf( "%s%s %d %d", f.sanitizeKey(name), attributes, value, - timestamp/pdata.Timestamp(time.Millisecond), + timestamp/pcommon.Timestamp(time.Millisecond), ) } // doubleValueLine returns prometheus line with given value -func (f *prometheusFormatter) doubleValueLine(name string, value float64, dp dataPoint, attributes pdata.Map) string { +func (f *prometheusFormatter) doubleValueLine(name string, value float64, dp dataPoint, attributes pcommon.Map) string { return f.doubleLine( name, f.tags2String(attributes, dp.Attributes()), @@ -141,7 +142,7 @@ func (f *prometheusFormatter) doubleValueLine(name string, value float64, dp dat } // uintValueLine returns prometheus line with given value -func (f *prometheusFormatter) uintValueLine(name string, value uint64, dp dataPoint, attributes pdata.Map) string { +func (f *prometheusFormatter) uintValueLine(name string, value uint64, dp dataPoint, attributes pcommon.Map) string { return f.uintLine( name, f.tags2String(attributes, dp.Attributes()), @@ -150,17 +151,17 @@ func (f *prometheusFormatter) uintValueLine(name string, value uint64, dp dataPo ) } -// numberDataPointValueLine returns prometheus line with value from pdata.NumberDataPoint -func (f *prometheusFormatter) numberDataPointValueLine(name string, dp pdata.NumberDataPoint, attributes pdata.Map) string { +// numberDataPointValueLine returns prometheus line with value from pmetric.NumberDataPoint +func (f *prometheusFormatter) numberDataPointValueLine(name string, dp pmetric.NumberDataPoint, attributes pcommon.Map) string { switch dp.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return f.doubleValueLine( name, dp.DoubleVal(), dp, attributes, ) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return f.intLine( name, f.tags2String(attributes, dp.Attributes()), @@ -181,11 +182,11 @@ func (f *prometheusFormatter) countMetric(name string) string { return fmt.Sprintf("%s_count", name) } -// mergeAttributes gets two pdata.Map and returns new which contains values from both of them -func (f *prometheusFormatter) mergeAttributes(attributes pdata.Map, additionalAttributes pdata.Map) pdata.Map { - mergedAttributes := pdata.NewMap() +// mergeAttributes gets two pcommon.Map and returns new which contains values from both of them +func (f *prometheusFormatter) mergeAttributes(attributes pcommon.Map, additionalAttributes pcommon.Map) pcommon.Map { + mergedAttributes := pcommon.NewMap() attributes.CopyTo(mergedAttributes) - additionalAttributes.Range(func(k string, v pdata.Value) bool { + additionalAttributes.Range(func(k string, v pcommon.Value) bool { mergedAttributes.Upsert(k, v) return true }) @@ -237,7 +238,7 @@ func (f *prometheusFormatter) summary2Strings(record metricPair) []string { for i := 0; i < dps.Len(); i++ { dp := dps.At(i) qs := dp.QuantileValues() - additionalAttributes := pdata.NewMap() + additionalAttributes := pcommon.NewMap() for i := 0; i < qs.Len(); i++ { q := qs.At(i) additionalAttributes.UpsertDouble(prometheusQuantileTag, q.Quantile()) @@ -285,7 +286,7 @@ func (f *prometheusFormatter) histogram2Strings(record metricPair) []string { } var cumulative uint64 - additionalAttributes := pdata.NewMap() + additionalAttributes := pcommon.NewMap() for i, bound := range explicitBounds { cumulative += dp.BucketCounts()[i] @@ -335,13 +336,13 @@ func (f *prometheusFormatter) metric2String(record metricPair) string { var lines []string switch record.metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: lines = f.gauge2Strings(record) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: lines = f.sum2Strings(record) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: lines = f.summary2Strings(record) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: lines = f.histogram2Strings(record) } return strings.Join(lines, "\n") diff --git a/exporter/sumologicexporter/prometheus_formatter_test.go b/exporter/sumologicexporter/prometheus_formatter_test.go index cfa8b270b7bd..3bbdfef6b761 100644 --- a/exporter/sumologicexporter/prometheus_formatter_test.go +++ b/exporter/sumologicexporter/prometheus_formatter_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestSanitizeKey(t *testing.T) { @@ -46,7 +46,7 @@ func TestTags2StringNoLabels(t *testing.T) { mp := exampleIntMetric() mp.attributes.Clear() - assert.Equal(t, prometheusTags(""), f.tags2String(mp.attributes, pdata.NewMap())) + assert.Equal(t, prometheusTags(""), f.tags2String(mp.attributes, pcommon.NewMap())) } func TestTags2String(t *testing.T) { @@ -57,7 +57,7 @@ func TestTags2String(t *testing.T) { assert.Equal( t, prometheusTags(`{test="test_value",test2="second_value"}`), - f.tags2String(mp.attributes, pdata.NewMap()), + f.tags2String(mp.attributes, pcommon.NewMap()), ) } @@ -67,7 +67,7 @@ func TestTags2StringNoAttributes(t *testing.T) { mp := exampleIntMetric() mp.attributes.Clear() - assert.Equal(t, prometheusTags(""), f.tags2String(pdata.NewMap(), pdata.NewMap())) + assert.Equal(t, prometheusTags(""), f.tags2String(pcommon.NewMap(), pcommon.NewMap())) } func TestPrometheusMetricDataTypeIntGauge(t *testing.T) { diff --git a/exporter/sumologicexporter/sender.go b/exporter/sumologicexporter/sender.go index 0aea24d48315..8ef9b0a33542 100644 --- a/exporter/sumologicexporter/sender.go +++ b/exporter/sumologicexporter/sender.go @@ -24,7 +24,9 @@ import ( "net/http" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" ) @@ -37,12 +39,12 @@ type appendResponse struct { // metricPair represents information required to send one metric to the Sumo Logic type metricPair struct { - attributes pdata.Map - metric pdata.Metric + attributes pcommon.Map + metric pmetric.Metric } type sender struct { - logBuffer []pdata.LogRecord + logBuffer []plog.LogRecord metricBuffer []metricPair config *Config client *http.Client @@ -55,7 +57,7 @@ type sender struct { const ( logKey string = "log" - // maxBufferSize defines size of the logBuffer (maximum number of pdata.LogRecord entries) + // maxBufferSize defines size of the logBuffer (maximum number of plog.LogRecord entries) maxBufferSize int = 1024 * 1024 headerContentType string = "Content-Type" @@ -167,12 +169,12 @@ func (s *sender) send(ctx context.Context, pipeline PipelineType, body io.Reader } // logToText converts LogRecord to a plain text line, returns it and error eventually -func (s *sender) logToText(record pdata.LogRecord) string { +func (s *sender) logToText(record plog.LogRecord) string { return record.Body().AsString() } // logToJSON converts LogRecord to a json line, returns it and error eventually -func (s *sender) logToJSON(record pdata.LogRecord) (string, error) { +func (s *sender) logToJSON(record plog.LogRecord) (string, error) { data := s.filter.filterOut(record.Attributes()) data.orig.Upsert(logKey, record.Body()) @@ -187,12 +189,12 @@ func (s *sender) logToJSON(record pdata.LogRecord) (string, error) { // sendLogs sends log records from the logBuffer formatted according // to configured LogFormat and as the result of execution // returns array of records which has not been sent correctly and error -func (s *sender) sendLogs(ctx context.Context, flds fields) ([]pdata.LogRecord, error) { +func (s *sender) sendLogs(ctx context.Context, flds fields) ([]plog.LogRecord, error) { var ( body strings.Builder errs error - droppedRecords []pdata.LogRecord - currentRecords []pdata.LogRecord + droppedRecords []plog.LogRecord + currentRecords []plog.LogRecord ) for _, record := range s.logBuffer { @@ -355,7 +357,7 @@ func (s *sender) cleanLogsBuffer() { // batchLog adds log to the logBuffer and flushes them if logBuffer is full to avoid overflow // returns list of log records which were not sent successfully -func (s *sender) batchLog(ctx context.Context, log pdata.LogRecord, metadata fields) ([]pdata.LogRecord, error) { +func (s *sender) batchLog(ctx context.Context, log plog.LogRecord, metadata fields) ([]plog.LogRecord, error) { s.logBuffer = append(s.logBuffer, log) if s.countLogs() >= maxBufferSize { diff --git a/exporter/sumologicexporter/sender_test.go b/exporter/sumologicexporter/sender_test.go index cdd346da6c15..4af63d9527ed 100644 --- a/exporter/sumologicexporter/sender_test.go +++ b/exporter/sumologicexporter/sender_test.go @@ -28,7 +28,8 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) type senderTest struct { @@ -107,21 +108,21 @@ func extractBody(t *testing.T, req *http.Request) string { return buf.String() } -func exampleLog() []pdata.LogRecord { - buffer := make([]pdata.LogRecord, 1) - buffer[0] = pdata.NewLogRecord() +func exampleLog() []plog.LogRecord { + buffer := make([]plog.LogRecord, 1) + buffer[0] = plog.NewLogRecord() buffer[0].Body().SetStringVal("Example log") return buffer } -func exampleTwoLogs() []pdata.LogRecord { - buffer := make([]pdata.LogRecord, 2) - buffer[0] = pdata.NewLogRecord() +func exampleTwoLogs() []plog.LogRecord { + buffer := make([]plog.LogRecord, 2) + buffer[0] = plog.NewLogRecord() buffer[0].Body().SetStringVal("Example log") buffer[0].Attributes().InsertString("key1", "value1") buffer[0].Attributes().InsertString("key2", "value2") - buffer[1] = pdata.NewLogRecord() + buffer[1] = plog.NewLogRecord() buffer[1].Body().SetStringVal("Another example log") buffer[1].Attributes().InsertString("key1", "value1") buffer[1].Attributes().InsertString("key2", "value2") @@ -129,13 +130,13 @@ func exampleTwoLogs() []pdata.LogRecord { return buffer } -func exampleTwoDifferentLogs() []pdata.LogRecord { - buffer := make([]pdata.LogRecord, 2) - buffer[0] = pdata.NewLogRecord() +func exampleTwoDifferentLogs() []plog.LogRecord { + buffer := make([]plog.LogRecord, 2) + buffer[0] = plog.NewLogRecord() buffer[0].Body().SetStringVal("Example log") buffer[0].Attributes().InsertString("key1", "value1") buffer[0].Attributes().InsertString("key2", "value2") - buffer[1] = pdata.NewLogRecord() + buffer[1] = plog.NewLogRecord() buffer[1].Body().SetStringVal("Another example log") buffer[1].Attributes().InsertString("key3", "value3") buffer[1].Attributes().InsertString("key4", "value4") @@ -143,27 +144,27 @@ func exampleTwoDifferentLogs() []pdata.LogRecord { return buffer } -func exampleMultitypeLogs() []pdata.LogRecord { - buffer := make([]pdata.LogRecord, 2) +func exampleMultitypeLogs() []plog.LogRecord { + buffer := make([]plog.LogRecord, 2) - attVal := pdata.NewValueMap() + attVal := pcommon.NewValueMap() attMap := attVal.MapVal() attMap.InsertString("lk1", "lv1") attMap.InsertInt("lk2", 13) - buffer[0] = pdata.NewLogRecord() + buffer[0] = plog.NewLogRecord() attVal.CopyTo(buffer[0].Body()) buffer[0].Attributes().InsertString("key1", "value1") buffer[0].Attributes().InsertString("key2", "value2") - buffer[1] = pdata.NewLogRecord() + buffer[1] = plog.NewLogRecord() - attVal = pdata.NewValueSlice() + attVal = pcommon.NewValueSlice() attArr := attVal.SliceVal() - strVal := pdata.NewValueEmpty() + strVal := pcommon.NewValueEmpty() strVal.SetStringVal("lv2") - intVal := pdata.NewValueEmpty() + intVal := pcommon.NewValueEmpty() intVal.SetIntVal(13) strTgt := attArr.AppendEmpty() @@ -231,7 +232,7 @@ func TestSendLogsSplit(t *testing.T) { test.s.config.MaxRequestBodySize = 10 test.s.logBuffer = exampleTwoLogs() - _, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + _, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.NoError(t, err) } func TestSendLogsSplitFailedOne(t *testing.T) { @@ -252,7 +253,7 @@ func TestSendLogsSplitFailedOne(t *testing.T) { test.s.config.LogFormat = TextFormat test.s.logBuffer = exampleTwoLogs() - dropped, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.EqualError(t, err, "error during sending data: 500 Internal Server Error") assert.Equal(t, test.s.logBuffer[0:1], dropped) } @@ -277,7 +278,7 @@ func TestSendLogsSplitFailedAll(t *testing.T) { test.s.config.LogFormat = TextFormat test.s.logBuffer = exampleTwoLogs() - dropped, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.EqualError( t, err, @@ -342,7 +343,7 @@ func TestSendLogsJsonSplit(t *testing.T) { test.s.config.MaxRequestBodySize = 10 test.s.logBuffer = exampleTwoLogs() - _, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + _, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.NoError(t, err) } @@ -364,7 +365,7 @@ func TestSendLogsJsonSplitFailedOne(t *testing.T) { test.s.config.MaxRequestBodySize = 10 test.s.logBuffer = exampleTwoLogs() - dropped, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.EqualError(t, err, "error during sending data: 500 Internal Server Error") assert.Equal(t, test.s.logBuffer[0:1], dropped) } @@ -389,7 +390,7 @@ func TestSendLogsJsonSplitFailedAll(t *testing.T) { test.s.config.MaxRequestBodySize = 10 test.s.logBuffer = exampleTwoLogs() - dropped, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.EqualError( t, err, @@ -408,7 +409,7 @@ func TestSendLogsUnexpectedFormat(t *testing.T) { logs := exampleTwoLogs() test.s.logBuffer = logs - dropped, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.Error(t, err) assert.Equal(t, logs, dropped) } @@ -465,13 +466,13 @@ func TestLogsBuffer(t *testing.T) { assert.Equal(t, test.s.countLogs(), 0) logs := exampleTwoLogs() - droppedLogs, err := test.s.batchLog(context.Background(), logs[0], newFields(pdata.NewMap())) + droppedLogs, err := test.s.batchLog(context.Background(), logs[0], newFields(pcommon.NewMap())) require.NoError(t, err) assert.Nil(t, droppedLogs) assert.Equal(t, 1, test.s.countLogs()) - assert.Equal(t, []pdata.LogRecord{logs[0]}, test.s.logBuffer) + assert.Equal(t, []plog.LogRecord{logs[0]}, test.s.logBuffer) - droppedLogs, err = test.s.batchLog(context.Background(), logs[1], newFields(pdata.NewMap())) + droppedLogs, err = test.s.batchLog(context.Background(), logs[1], newFields(pcommon.NewMap())) require.NoError(t, err) assert.Nil(t, droppedLogs) assert.Equal(t, 2, test.s.countLogs()) @@ -479,7 +480,7 @@ func TestLogsBuffer(t *testing.T) { test.s.cleanLogsBuffer() assert.Equal(t, 0, test.s.countLogs()) - assert.Equal(t, []pdata.LogRecord{}, test.s.logBuffer) + assert.Equal(t, []plog.LogRecord{}, test.s.logBuffer) } func TestInvalidEndpoint(t *testing.T) { @@ -489,7 +490,7 @@ func TestInvalidEndpoint(t *testing.T) { test.s.config.HTTPClientSettings.Endpoint = ":" test.s.logBuffer = exampleLog() - _, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + _, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.EqualError(t, err, `parse ":": missing protocol scheme`) } @@ -500,7 +501,7 @@ func TestInvalidPostRequest(t *testing.T) { test.s.config.HTTPClientSettings.Endpoint = "" test.s.logBuffer = exampleLog() - _, err := test.s.sendLogs(context.Background(), newFields(pdata.NewMap())) + _, err := test.s.sendLogs(context.Background(), newFields(pcommon.NewMap())) assert.EqualError(t, err, `Post "": unsupported protocol scheme ""`) } @@ -510,7 +511,7 @@ func TestLogsBufferOverflow(t *testing.T) { test.s.config.HTTPClientSettings.Endpoint = ":" log := exampleLog() - flds := newFields(pdata.NewMap()) + flds := newFields(pcommon.NewMap()) for test.s.countLogs() < maxBufferSize-1 { _, err := test.s.batchLog(context.Background(), log[0], flds) @@ -528,7 +529,7 @@ func TestInvalidMetricFormat(t *testing.T) { test.s.config.MetricFormat = "invalid" - err := test.s.send(context.Background(), MetricsPipeline, strings.NewReader(""), newFields(pdata.NewMap())) + err := test.s.send(context.Background(), MetricsPipeline, strings.NewReader(""), newFields(pcommon.NewMap())) assert.EqualError(t, err, `unsupported metrics format: invalid`) } @@ -536,7 +537,7 @@ func TestInvalidPipeline(t *testing.T) { test := prepareSenderTest(t, []func(w http.ResponseWriter, req *http.Request){}) defer func() { test.srv.Close() }() - err := test.s.send(context.Background(), "invalidPipeline", strings.NewReader(""), newFields(pdata.NewMap())) + err := test.s.send(context.Background(), "invalidPipeline", strings.NewReader(""), newFields(pcommon.NewMap())) assert.EqualError(t, err, `unexpected pipeline`) } @@ -560,7 +561,7 @@ func TestSendCompressGzip(t *testing.T) { test.s.compressor = c reader := strings.NewReader("Some example log") - err = test.s.send(context.Background(), LogsPipeline, reader, newFields(pdata.NewMap())) + err = test.s.send(context.Background(), LogsPipeline, reader, newFields(pcommon.NewMap())) require.NoError(t, err) } @@ -584,7 +585,7 @@ func TestSendCompressDeflate(t *testing.T) { test.s.compressor = c reader := strings.NewReader("Some example log") - err = test.s.send(context.Background(), LogsPipeline, reader, newFields(pdata.NewMap())) + err = test.s.send(context.Background(), LogsPipeline, reader, newFields(pcommon.NewMap())) require.NoError(t, err) } @@ -595,7 +596,7 @@ func TestCompressionError(t *testing.T) { test.s.compressor = getTestCompressor(errors.New("read error"), nil) reader := strings.NewReader("Some example log") - err := test.s.send(context.Background(), LogsPipeline, reader, newFields(pdata.NewMap())) + err := test.s.send(context.Background(), LogsPipeline, reader, newFields(pcommon.NewMap())) assert.EqualError(t, err, "read error") } @@ -606,7 +607,7 @@ func TestInvalidContentEncoding(t *testing.T) { test.s.config.CompressEncoding = "test" reader := strings.NewReader("Some example log") - err := test.s.send(context.Background(), LogsPipeline, reader, newFields(pdata.NewMap())) + err := test.s.send(context.Background(), LogsPipeline, reader, newFields(pcommon.NewMap())) assert.EqualError(t, err, "invalid content encoding: test") } @@ -659,7 +660,7 @@ gauge_metric_name{foo="bar",remote_name="156955",url="http://another_url"} 245 1 exampleIntGaugeMetric(), } - _, err := test.s.sendMetrics(context.Background(), newFields(pdata.NewMap())) + _, err := test.s.sendMetrics(context.Background(), newFields(pcommon.NewMap())) assert.NoError(t, err) } @@ -687,7 +688,7 @@ gauge_metric_name{foo="bar",remote_name="156955",url="http://another_url"} 245 1 exampleIntGaugeMetric(), } - dropped, err := test.s.sendMetrics(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendMetrics(context.Background(), newFields(pcommon.NewMap())) assert.EqualError(t, err, "error during sending data: 500 Internal Server Error") assert.Equal(t, test.s.metricBuffer[0:1], dropped) } @@ -718,7 +719,7 @@ gauge_metric_name{foo="bar",remote_name="156955",url="http://another_url"} 245 1 exampleIntGaugeMetric(), } - dropped, err := test.s.sendMetrics(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendMetrics(context.Background(), newFields(pcommon.NewMap())) assert.EqualError( t, err, @@ -739,7 +740,7 @@ func TestSendMetricsUnexpectedFormat(t *testing.T) { } test.s.metricBuffer = metrics - dropped, err := test.s.sendMetrics(context.Background(), newFields(pdata.NewMap())) + dropped, err := test.s.sendMetrics(context.Background(), newFields(pcommon.NewMap())) assert.EqualError(t, err, "unexpected metric format: invalid") assert.Equal(t, dropped, metrics) } @@ -754,13 +755,13 @@ func TestMetricsBuffer(t *testing.T) { exampleIntGaugeMetric(), } - droppedMetrics, err := test.s.batchMetric(context.Background(), metrics[0], newFields(pdata.NewMap())) + droppedMetrics, err := test.s.batchMetric(context.Background(), metrics[0], newFields(pcommon.NewMap())) require.NoError(t, err) assert.Nil(t, droppedMetrics) assert.Equal(t, 1, test.s.countMetrics()) assert.Equal(t, metrics[0:1], test.s.metricBuffer) - droppedMetrics, err = test.s.batchMetric(context.Background(), metrics[1], newFields(pdata.NewMap())) + droppedMetrics, err = test.s.batchMetric(context.Background(), metrics[1], newFields(pcommon.NewMap())) require.NoError(t, err) assert.Nil(t, droppedMetrics) assert.Equal(t, 2, test.s.countMetrics()) @@ -780,7 +781,7 @@ func TestMetricsBufferOverflow(t *testing.T) { test.s.config.MetricFormat = PrometheusFormat test.s.config.MaxRequestBodySize = 1024 * 1024 * 1024 * 1024 metric := exampleIntMetric() - flds := newFields(pdata.NewMap()) + flds := newFields(pcommon.NewMap()) for test.s.countMetrics() < maxBufferSize-1 { _, err := test.s.batchMetric(context.Background(), metric, flds) diff --git a/exporter/sumologicexporter/test_data.go b/exporter/sumologicexporter/test_data.go index a254a80bf721..5b1dc3e69fea 100644 --- a/exporter/sumologicexporter/test_data.go +++ b/exporter/sumologicexporter/test_data.go @@ -15,7 +15,8 @@ package sumologicexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sumologicexporter" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func exampleIntMetric() metricPair { @@ -23,10 +24,10 @@ func exampleIntMetric() metricPair { } func buildExampleIntMetric(fillData bool) metricPair { - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName("test.metric.data") metric.SetUnit("bytes") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) if fillData { dp := metric.Sum().DataPoints().AppendEmpty() @@ -34,7 +35,7 @@ func buildExampleIntMetric(fillData bool) metricPair { dp.SetIntVal(14500) } - attributes := pdata.NewMap() + attributes := pcommon.NewMap() attributes.InsertString("test", "test_value") attributes.InsertString("test2", "second_value") @@ -50,11 +51,11 @@ func exampleIntGaugeMetric() metricPair { func buildExampleIntGaugeMetric(fillData bool) metricPair { metric := metricPair{ - attributes: pdata.NewMap(), - metric: pdata.NewMetric(), + attributes: pcommon.NewMap(), + metric: pmetric.NewMetric(), } - metric.metric.SetDataType(pdata.MetricDataTypeGauge) + metric.metric.SetDataType(pmetric.MetricDataTypeGauge) metric.metric.SetName("gauge_metric_name") metric.attributes.InsertString("foo", "bar") @@ -82,11 +83,11 @@ func exampleDoubleGaugeMetric() metricPair { func buildExampleDoubleGaugeMetric(fillData bool) metricPair { metric := metricPair{ - attributes: pdata.NewMap(), - metric: pdata.NewMetric(), + attributes: pcommon.NewMap(), + metric: pmetric.NewMetric(), } - metric.metric.SetDataType(pdata.MetricDataTypeGauge) + metric.metric.SetDataType(pmetric.MetricDataTypeGauge) metric.metric.SetName("gauge_metric_name_double_test") metric.attributes.InsertString("foo", "bar") @@ -114,11 +115,11 @@ func exampleIntSumMetric() metricPair { func buildExampleIntSumMetric(fillData bool) metricPair { metric := metricPair{ - attributes: pdata.NewMap(), - metric: pdata.NewMetric(), + attributes: pcommon.NewMap(), + metric: pmetric.NewMetric(), } - metric.metric.SetDataType(pdata.MetricDataTypeSum) + metric.metric.SetDataType(pmetric.MetricDataTypeSum) metric.metric.SetName("sum_metric_int_test") metric.attributes.InsertString("foo", "bar") @@ -146,11 +147,11 @@ func exampleDoubleSumMetric() metricPair { func buildExampleDoubleSumMetric(fillData bool) metricPair { metric := metricPair{ - attributes: pdata.NewMap(), - metric: pdata.NewMetric(), + attributes: pcommon.NewMap(), + metric: pmetric.NewMetric(), } - metric.metric.SetDataType(pdata.MetricDataTypeSum) + metric.metric.SetDataType(pmetric.MetricDataTypeSum) metric.metric.SetName("sum_metric_double_test") metric.attributes.InsertString("foo", "bar") @@ -178,11 +179,11 @@ func exampleSummaryMetric() metricPair { func buildExampleSummaryMetric(fillData bool) metricPair { metric := metricPair{ - attributes: pdata.NewMap(), - metric: pdata.NewMetric(), + attributes: pcommon.NewMap(), + metric: pmetric.NewMetric(), } - metric.metric.SetDataType(pdata.MetricDataTypeSummary) + metric.metric.SetDataType(pmetric.MetricDataTypeSummary) metric.metric.SetName("summary_metric_double_test") metric.attributes.InsertString("foo", "bar") @@ -220,11 +221,11 @@ func exampleHistogramMetric() metricPair { func buildExampleHistogramMetric(fillData bool) metricPair { metric := metricPair{ - attributes: pdata.NewMap(), - metric: pdata.NewMetric(), + attributes: pcommon.NewMap(), + metric: pmetric.NewMetric(), } - metric.metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.metric.SetDataType(pmetric.MetricDataTypeHistogram) metric.metric.SetName("histogram_metric_double_test") metric.attributes.InsertString("bar", "foo") @@ -255,8 +256,8 @@ func buildExampleHistogramMetric(fillData bool) metricPair { return metric } -func metricPairToMetrics(mp []metricPair) pdata.Metrics { - metrics := pdata.NewMetrics() +func metricPairToMetrics(mp []metricPair) pmetric.Metrics { + metrics := pmetric.NewMetrics() metrics.ResourceMetrics().EnsureCapacity(len(mp)) for num, record := range mp { record.attributes.CopyTo(metrics.ResourceMetrics().AppendEmpty().Resource().Attributes()) @@ -268,7 +269,7 @@ func metricPairToMetrics(mp []metricPair) pdata.Metrics { } func fieldsFromMap(s map[string]string) fields { - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() for k, v := range s { attrMap.InsertString(k, v) } diff --git a/exporter/tanzuobservabilityexporter/exporter.go b/exporter/tanzuobservabilityexporter/exporter.go index 993399b21987..e02723bf1e6a 100644 --- a/exporter/tanzuobservabilityexporter/exporter.go +++ b/exporter/tanzuobservabilityexporter/exporter.go @@ -25,7 +25,7 @@ import ( "github.com/wavefronthq/wavefront-sdk-go/senders" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -99,7 +99,7 @@ func newTracesExporter(settings component.ExporterCreateSettings, c config.Expor }, nil } -func (e *tracesExporter) pushTraceData(ctx context.Context, td pdata.Traces) error { +func (e *tracesExporter) pushTraceData(ctx context.Context, td ptrace.Traces) error { var errs error for i := 0; i < td.ResourceSpans().Len(); i++ { diff --git a/exporter/tanzuobservabilityexporter/exporter_test.go b/exporter/tanzuobservabilityexporter/exporter_test.go index e8858c428f29..b5862c69e18b 100644 --- a/exporter/tanzuobservabilityexporter/exporter_test.go +++ b/exporter/tanzuobservabilityexporter/exporter_test.go @@ -25,17 +25,18 @@ import ( "github.com/wavefronthq/wavefront-sdk-go/senders" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) func TestSpansRequireTraceAndSpanIDs(t *testing.T) { - spanWithNoTraceID := pdata.NewSpan() - spanWithNoTraceID.SetSpanID(pdata.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9})) - spanWithNoSpanID := pdata.NewSpan() - spanWithNoSpanID.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - traces := constructTraces([]pdata.Span{spanWithNoTraceID, spanWithNoSpanID}) + spanWithNoTraceID := ptrace.NewSpan() + spanWithNoTraceID.SetSpanID(pcommon.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9})) + spanWithNoSpanID := ptrace.NewSpan() + spanWithNoSpanID.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + traces := constructTraces([]ptrace.Span{spanWithNoTraceID, spanWithNoSpanID}) _, err := consumeTraces(traces) require.Error(t, err) @@ -48,11 +49,11 @@ func TestExportTraceDataMinimum(t *testing.T) { // getAllUsers source=localhost traceId=7b3bf470-9456-11e8-9eb6-529269fb1459 spanId=0313bafe-9457-11e8-9eb6-529269fb1459 parent=2f64e538-9457-11e8-9eb6-529269fb1459 application=Wavefront service=auth cluster=us-west-2 shard=secondary http.method=GET 1552949776000 343 minSpan := createSpan( "root", - pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - pdata.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), - pdata.SpanID{}, + pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), + pcommon.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), + pcommon.SpanID{}, ) - traces := constructTraces([]pdata.Span{minSpan}) + traces := constructTraces([]ptrace.Span{minSpan}) expected := []*span{{ Name: "root", @@ -68,53 +69,53 @@ func TestExportTraceDataMinimum(t *testing.T) { } func TestExportTraceDataFullTrace(t *testing.T) { - traceID := pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) + traceID := pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}) rootSpan := createSpan( "root", traceID, - pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}), - pdata.SpanID{}, + pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}), + pcommon.SpanID{}, ) clientSpan := createSpan( "client", traceID, - pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2}), + pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 2}), rootSpan.SpanID(), ) - clientSpan.SetKind(pdata.SpanKindClient) - event := pdata.NewSpanEvent() + clientSpan.SetKind(ptrace.SpanKindClient) + event := ptrace.NewSpanEvent() event.SetName("client-event") event.CopyTo(clientSpan.Events().AppendEmpty()) - status := pdata.NewSpanStatus() - status.SetCode(pdata.StatusCodeError) + status := ptrace.NewSpanStatus() + status.SetCode(ptrace.StatusCodeError) status.SetMessage("an error event occurred") status.CopyTo(clientSpan.Status()) - clientAttrs := pdata.NewMap() + clientAttrs := pcommon.NewMap() clientAttrs.InsertString(labelApplication, "test-app") clientAttrs.CopyTo(clientSpan.Attributes()) serverSpan := createSpan( "server", traceID, - pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3}), + pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 3}), clientSpan.SpanID(), ) - serverSpan.SetKind(pdata.SpanKindServer) + serverSpan.SetKind(ptrace.SpanKindServer) serverSpan.SetTraceState("key=val") - serverAttrs := pdata.NewMap() + serverAttrs := pcommon.NewMap() serverAttrs.InsertString(conventions.AttributeServiceName, "the-server") serverAttrs.InsertString(conventions.AttributeHTTPMethod, "POST") serverAttrs.InsertInt(conventions.AttributeHTTPStatusCode, 403) serverAttrs.InsertString(labelSource, "test_source") serverAttrs.CopyTo(serverSpan.Attributes()) - traces := constructTraces([]pdata.Span{rootSpan, clientSpan, serverSpan}) - resourceAttrs := pdata.NewMap() + traces := constructTraces([]ptrace.Span{rootSpan, clientSpan, serverSpan}) + resourceAttrs := pcommon.NewMap() resourceAttrs.InsertString("resource", "R1") resourceAttrs.InsertString(conventions.AttributeServiceName, "test-service") resourceAttrs.InsertString(labelSource, "test-source") @@ -171,7 +172,7 @@ func TestExportTraceDataFullTrace(t *testing.T) { validateTraces(t, expected, traces) } -func validateTraces(t *testing.T, expected []*span, traces pdata.Traces) { +func validateTraces(t *testing.T, expected []*span, traces ptrace.Traces) { actual, err := consumeTraces(traces) require.NoError(t, err) require.Equal(t, len(expected), len(actual)) @@ -195,11 +196,11 @@ func validateTraces(t *testing.T, expected []*span, traces pdata.Traces) { func TestExportTraceDataWithInstrumentationDetails(t *testing.T) { minSpan := createSpan( "root", - pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - pdata.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), - pdata.SpanID{}, + pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), + pcommon.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), + pcommon.SpanID{}, ) - traces := constructTraces([]pdata.Span{minSpan}) + traces := constructTraces([]ptrace.Span{minSpan}) instrumentationLibrary := traces.ResourceSpans().At(0).ScopeSpans().At(0). Scope() @@ -222,11 +223,11 @@ func TestExportTraceDataWithInstrumentationDetails(t *testing.T) { } func TestExportTraceDataRespectsContext(t *testing.T) { - traces := constructTraces([]pdata.Span{createSpan( + traces := constructTraces([]ptrace.Span{createSpan( "root", - pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), - pdata.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), - pdata.SpanID{}, + pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}), + pcommon.NewSpanID([8]byte{9, 9, 9, 9, 9, 9, 9, 9}), + pcommon.SpanID{}, )}) sender := &mockSender{} @@ -251,11 +252,11 @@ func TestExportTraceDataRespectsContext(t *testing.T) { func createSpan( name string, - traceID pdata.TraceID, - spanID pdata.SpanID, - parentSpanID pdata.SpanID, -) pdata.Span { - span := pdata.NewSpan() + traceID pcommon.TraceID, + spanID pcommon.SpanID, + parentSpanID pcommon.SpanID, +) ptrace.Span { + span := ptrace.NewSpan() span.SetName(name) span.SetTraceID(traceID) span.SetSpanID(spanID) @@ -263,8 +264,8 @@ func createSpan( return span } -func constructTraces(spans []pdata.Span) pdata.Traces { - traces := pdata.NewTraces() +func constructTraces(spans []ptrace.Span) ptrace.Traces { + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(1) rs := traces.ResourceSpans().AppendEmpty() rs.ScopeSpans().EnsureCapacity(1) @@ -276,7 +277,7 @@ func constructTraces(spans []pdata.Span) pdata.Traces { return traces } -func consumeTraces(ptrace pdata.Traces) ([]*span, error) { +func consumeTraces(ptrace ptrace.Traces) ([]*span, error) { ctx := context.Background() sender := &mockSender{} diff --git a/exporter/tanzuobservabilityexporter/go.mod b/exporter/tanzuobservabilityexporter/go.mod index 49b6c74a9158..aac11c107e1d 100644 --- a/exporter/tanzuobservabilityexporter/go.mod +++ b/exporter/tanzuobservabilityexporter/go.mod @@ -8,15 +8,16 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.48.0 github.com/stretchr/testify v1.7.1 github.com/wavefronthq/wavefront-sdk-go v0.9.11 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) require ( github.com/caio/go-tdigest v3.1.0+incompatible // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -25,24 +26,21 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -50,3 +48,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/tanzuobservabilityexporter/go.sum b/exporter/tanzuobservabilityexporter/go.sum index 2e2cb2269a5b..f347f1345b8d 100644 --- a/exporter/tanzuobservabilityexporter/go.sum +++ b/exporter/tanzuobservabilityexporter/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -20,16 +19,14 @@ github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds= github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -40,7 +37,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -95,7 +91,6 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -127,8 +122,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -176,9 +171,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -196,10 +188,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -210,7 +204,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -249,8 +243,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -274,13 +268,11 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -307,7 +299,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -317,7 +308,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -331,7 +321,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/exporter/tanzuobservabilityexporter/metrics.go b/exporter/tanzuobservabilityexporter/metrics.go index d3f658c83555..65a3b13bfe90 100644 --- a/exporter/tanzuobservabilityexporter/metrics.go +++ b/exporter/tanzuobservabilityexporter/metrics.go @@ -25,7 +25,8 @@ import ( "github.com/wavefronthq/wavefront-sdk-go/histogram" "github.com/wavefronthq/wavefront-sdk-go/senders" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "go.uber.org/zap" ) @@ -58,13 +59,13 @@ var ( // metricsConsumer instances consume OTEL metrics type metricsConsumer struct { - consumerMap map[pdata.MetricDataType]typedMetricConsumer + consumerMap map[pmetric.MetricDataType]typedMetricConsumer sender flushCloser reportInternalMetrics bool } type metricInfo struct { - pdata.Metric + pmetric.Metric Source string SourceKey string } @@ -80,7 +81,7 @@ func newMetricsConsumer( sender flushCloser, reportInternalMetrics bool, ) *metricsConsumer { - consumerMap := make(map[pdata.MetricDataType]typedMetricConsumer, len(consumers)) + consumerMap := make(map[pmetric.MetricDataType]typedMetricConsumer, len(consumers)) for _, consumer := range consumers { if consumerMap[consumer.Type()] != nil { panic("duplicate consumer type detected: " + consumer.Type().String()) @@ -98,7 +99,7 @@ func newMetricsConsumer( // typedMetricConsumer that consumes that type of metric. Once Consume consumes // all the metrics, it calls Flush() on the sender passed to // newMetricsConsumer. -func (c *metricsConsumer) Consume(ctx context.Context, md pdata.Metrics) error { +func (c *metricsConsumer) Consume(ctx context.Context, md pmetric.Metrics) error { var errs []error rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { @@ -161,7 +162,7 @@ type typedMetricConsumer interface { // Type returns the type of metric this consumer consumes. For example // Gauge, Sum, or Histogram - Type() pdata.MetricDataType + Type() pmetric.MetricDataType // Consume consumes the metric from the metricInfo and appends any errors encountered to errs Consume(mi metricInfo, errs *[]error) @@ -209,7 +210,7 @@ func (c *counter) Get() int64 { // logMissingValue keeps track of metrics with missing values. metric is the // metric with the missing value. settings logs the missing value. count counts // metrics with missing values. -func logMissingValue(metric pdata.Metric, settings component.TelemetrySettings, count *counter) { +func logMissingValue(metric pmetric.Metric, settings component.TelemetrySettings, count *counter) { namef := zap.String(metricNameString, metric.Name()) typef := zap.String(metricTypeString, metric.DataType().String()) settings.Logger.Debug("Metric missing value", namef, typef) @@ -217,11 +218,11 @@ func logMissingValue(metric pdata.Metric, settings component.TelemetrySettings, } // getValue gets the floating point value out of a NumberDataPoint -func getValue(numberDataPoint pdata.NumberDataPoint) (float64, error) { +func getValue(numberDataPoint pmetric.NumberDataPoint) (float64, error) { switch numberDataPoint.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return float64(numberDataPoint.IntVal()), nil - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return numberDataPoint.DoubleVal(), nil default: return 0.0, errors.New("unsupported metric value type") @@ -235,7 +236,7 @@ func getValue(numberDataPoint pdata.NumberDataPoint) (float64, error) { // keeps track of metrics with missing values. func pushGaugeNumberDataPoint( mi metricInfo, - numberDataPoint pdata.NumberDataPoint, + numberDataPoint pmetric.NumberDataPoint, errs *[]error, sender gaugeSender, settings component.TelemetrySettings, @@ -275,8 +276,8 @@ func newGaugeConsumer( } } -func (g *gaugeConsumer) Type() pdata.MetricDataType { - return pdata.MetricDataTypeGauge +func (g *gaugeConsumer) Type() pmetric.MetricDataType { + return pmetric.MetricDataTypeGauge } func (g *gaugeConsumer) Consume(mi metricInfo, errs *[]error) { @@ -313,13 +314,13 @@ func newSumConsumer( } } -func (s *sumConsumer) Type() pdata.MetricDataType { - return pdata.MetricDataTypeSum +func (s *sumConsumer) Type() pmetric.MetricDataType { + return pmetric.MetricDataTypeSum } func (s *sumConsumer) Consume(mi metricInfo, errs *[]error) { sum := mi.Sum() - isDelta := sum.AggregationTemporality() == pdata.MetricAggregationTemporalityDelta + isDelta := sum.AggregationTemporality() == pmetric.MetricAggregationTemporalityDelta numberDataPoints := sum.DataPoints() for i := 0; i < numberDataPoints.Len(); i++ { // If sum is a delta type, send it to tanzu observability as a @@ -338,7 +339,7 @@ func (s *sumConsumer) PushInternalMetrics(errs *[]error) { s.missingValues.Report(missingValueMetricName, typeIsSumTags, s.sender, errs) } -func (s *sumConsumer) pushNumberDataPoint(mi metricInfo, numberDataPoint pdata.NumberDataPoint, errs *[]error) { +func (s *sumConsumer) pushNumberDataPoint(mi metricInfo, numberDataPoint pmetric.NumberDataPoint, errs *[]error) { tags := attributesToTagsForMetrics(numberDataPoint.Attributes(), mi.SourceKey) value, err := getValue(numberDataPoint) if err != nil { @@ -375,14 +376,14 @@ func (r *histogramReporting) NoAggregationTemporality() int64 { } // LogMalformed logs seeing one malformed data point. -func (r *histogramReporting) LogMalformed(metric pdata.Metric) { +func (r *histogramReporting) LogMalformed(metric pmetric.Metric) { namef := zap.String(metricNameString, metric.Name()) r.settings.Logger.Debug("Malformed histogram", namef) r.malformedHistograms.Inc() } // LogNoAggregationTemporality logs seeing a histogram metric with no aggregation temporality -func (r *histogramReporting) LogNoAggregationTemporality(metric pdata.Metric) { +func (r *histogramReporting) LogNoAggregationTemporality(metric pmetric.Metric) { namef := zap.String(metricNameString, metric.Name()) r.settings.Logger.Debug("histogram metric missing aggregation temporality", namef) r.noAggregationTemporality.Inc() @@ -425,7 +426,7 @@ func newHistogramConsumer( } } -func (h *histogramConsumer) Type() pdata.MetricDataType { +func (h *histogramConsumer) Type() pmetric.MetricDataType { return h.spec.Type() } @@ -434,9 +435,9 @@ func (h *histogramConsumer) Consume(mi metricInfo, errs *[]error) { aggregationTemporality := aHistogram.AggregationTemporality() var consumer histogramDataPointConsumer switch aggregationTemporality { - case pdata.MetricAggregationTemporalityDelta: + case pmetric.MetricAggregationTemporalityDelta: consumer = h.delta - case pdata.MetricAggregationTemporalityCumulative: + case pmetric.MetricAggregationTemporalityCumulative: consumer = h.cumulative default: h.reporting.LogNoAggregationTemporality(mi.Metric) @@ -569,15 +570,15 @@ type histogramDataPoint interface { Count() uint64 ExplicitBounds() []float64 BucketCounts() []uint64 - Attributes() pdata.Map - Timestamp() pdata.Timestamp + Attributes() pcommon.Map + Timestamp() pcommon.Timestamp } // histogramMetric represents either a regular or exponential histogram type histogramMetric interface { // AggregationTemporality returns whether the histogram is delta or cumulative - AggregationTemporality() pdata.MetricAggregationTemporality + AggregationTemporality() pmetric.MetricAggregationTemporality // Len returns the number of data points in this histogram Len() int @@ -590,16 +591,16 @@ type histogramMetric interface { type histogramConsumerSpec interface { // Type returns either regular or exponential histogram - Type() pdata.MetricDataType + Type() pmetric.MetricDataType // AsHistogram returns given metric as a regular or exponential histogram depending on // what Type returns. - AsHistogram(metric pdata.Metric) histogramMetric + AsHistogram(metric pmetric.Metric) histogramMetric } type regularHistogramMetric struct { - pdata.Histogram - pdata.HistogramDataPointSlice + pmetric.Histogram + pmetric.HistogramDataPointSlice } func (r *regularHistogramMetric) At(i int) histogramDataPoint { @@ -609,11 +610,11 @@ func (r *regularHistogramMetric) At(i int) histogramDataPoint { type regularHistogramConsumerSpec struct { } -func (regularHistogramConsumerSpec) Type() pdata.MetricDataType { - return pdata.MetricDataTypeHistogram +func (regularHistogramConsumerSpec) Type() pmetric.MetricDataType { + return pmetric.MetricDataTypeHistogram } -func (regularHistogramConsumerSpec) AsHistogram(metric pdata.Metric) histogramMetric { +func (regularHistogramConsumerSpec) AsHistogram(metric pmetric.Metric) histogramMetric { aHistogram := metric.Histogram() return ®ularHistogramMetric{ Histogram: aHistogram, @@ -634,8 +635,8 @@ func newSummaryConsumer( return &summaryConsumer{sender: sender, settings: settings} } -func (s *summaryConsumer) Type() pdata.MetricDataType { - return pdata.MetricDataTypeSummary +func (s *summaryConsumer) Type() pmetric.MetricDataType { + return pmetric.MetricDataTypeSummary } func (s *summaryConsumer) Consume(mi metricInfo, errs *[]error) { @@ -652,7 +653,7 @@ func (*summaryConsumer) PushInternalMetrics(*[]error) { } func (s *summaryConsumer) sendSummaryDataPoint( - mi metricInfo, summaryDataPoint pdata.SummaryDataPoint, errs *[]error, + mi metricInfo, summaryDataPoint pmetric.SummaryDataPoint, errs *[]error, ) { name := mi.Name() ts := summaryDataPoint.Timestamp().AsTime().Unix() @@ -687,7 +688,7 @@ func (s *summaryConsumer) sendMetric( } } -func attributesToTagsForMetrics(attributes pdata.Map, sourceKey string) map[string]string { +func attributesToTagsForMetrics(attributes pcommon.Map, sourceKey string) map[string]string { tags := attributesToTags(attributes) delete(tags, sourceKey) replaceSource(tags) @@ -699,18 +700,18 @@ func quantileTagValue(quantile float64) string { } type exponentialHistogramDataPoint struct { - pdata.ExponentialHistogramDataPoint + pmetric.ExponentialHistogramDataPoint bucketCounts []uint64 explicitBounds []float64 } -// newExponentialHistogram converts a pdata.ExponentialHistogramDataPoint into a histogramDataPoint +// newExponentialHistogram converts a pmetric.ExponentialHistogramDataPoint into a histogramDataPoint // implementation. A regular histogramDataPoint has bucket counts and explicit bounds for each // bucket; an ExponentialHistogramDataPoint has only bucket counts because the explicit bounds // for each bucket are implied because they grow exponentially from bucket to bucket. The // conversion of an ExponentialHistogramDataPoint to a histogramDataPoint is necessary because the // code that sends histograms to tanzuobservability expects the histogramDataPoint format. -func newExponentialHistogramDataPoint(dataPoint pdata.ExponentialHistogramDataPoint) histogramDataPoint { +func newExponentialHistogramDataPoint(dataPoint pmetric.ExponentialHistogramDataPoint) histogramDataPoint { // Base is the factor by which the explicit bounds increase from bucket to bucket. // This formula comes from the documentation here: @@ -824,8 +825,8 @@ func (e *exponentialHistogramDataPoint) BucketCounts() []uint64 { } type exponentialHistogramMetric struct { - pdata.ExponentialHistogram - pdata.ExponentialHistogramDataPointSlice + pmetric.ExponentialHistogram + pmetric.ExponentialHistogramDataPointSlice } func (e *exponentialHistogramMetric) At(i int) histogramDataPoint { @@ -835,11 +836,11 @@ func (e *exponentialHistogramMetric) At(i int) histogramDataPoint { type exponentialHistogramConsumerSpec struct { } -func (exponentialHistogramConsumerSpec) Type() pdata.MetricDataType { - return pdata.MetricDataTypeExponentialHistogram +func (exponentialHistogramConsumerSpec) Type() pmetric.MetricDataType { + return pmetric.MetricDataTypeExponentialHistogram } -func (exponentialHistogramConsumerSpec) AsHistogram(metric pdata.Metric) histogramMetric { +func (exponentialHistogramConsumerSpec) AsHistogram(metric pmetric.Metric) histogramMetric { aHistogram := metric.ExponentialHistogram() return &exponentialHistogramMetric{ ExponentialHistogram: aHistogram, diff --git a/exporter/tanzuobservabilityexporter/metrics_exporter.go b/exporter/tanzuobservabilityexporter/metrics_exporter.go index 496220b82581..1f09a624ddab 100644 --- a/exporter/tanzuobservabilityexporter/metrics_exporter.go +++ b/exporter/tanzuobservabilityexporter/metrics_exporter.go @@ -23,7 +23,7 @@ import ( "github.com/wavefronthq/wavefront-sdk-go/senders" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) type metricsExporter struct { @@ -80,7 +80,7 @@ func newMetricsExporter(settings component.ExporterCreateSettings, c config.Expo }, nil } -func (e *metricsExporter) pushMetricsData(ctx context.Context, md pdata.Metrics) error { +func (e *metricsExporter) pushMetricsData(ctx context.Context, md pmetric.Metrics) error { return e.consumer.Consume(ctx, md) } diff --git a/exporter/tanzuobservabilityexporter/metrics_exporter_test.go b/exporter/tanzuobservabilityexporter/metrics_exporter_test.go index 0b519329e92f..31fdec04331b 100644 --- a/exporter/tanzuobservabilityexporter/metrics_exporter_test.go +++ b/exporter/tanzuobservabilityexporter/metrics_exporter_test.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestPushMetricsData(t *testing.T) { @@ -35,7 +35,7 @@ func TestPushMetricsDataErrorOnSend(t *testing.T) { } func verifyPushMetricsData(t *testing.T, errorOnSend bool) error { - metric := newMetric("test.metric", pdata.MetricDataTypeGauge) + metric := newMetric("test.metric", pmetric.MetricDataTypeGauge) dataPoints := metric.Gauge().DataPoints() dataPoints.EnsureCapacity(1) addDataPoint( @@ -82,7 +82,7 @@ func createMockMetricsExporter( ) } -func consumeMetrics(metrics pdata.Metrics, sender *mockMetricSender) error { +func consumeMetrics(metrics pmetric.Metrics, sender *mockMetricSender) error { ctx := context.Background() mockOTelMetricsExporter, err := createMockMetricsExporter(sender) if err != nil { diff --git a/exporter/tanzuobservabilityexporter/metrics_test.go b/exporter/tanzuobservabilityexporter/metrics_test.go index 8126afbe1133..25633929e4de 100644 --- a/exporter/tanzuobservabilityexporter/metrics_test.go +++ b/exporter/tanzuobservabilityexporter/metrics_test.go @@ -23,13 +23,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/wavefronthq/wavefront-sdk-go/histogram" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" ) func TestEndToEndGaugeConsumer(t *testing.T) { - gauge := newMetric("gauge", pdata.MetricDataTypeGauge) + gauge := newMetric("gauge", pmetric.MetricDataTypeGauge) dataPoints := gauge.Gauge().DataPoints() dataPoints.EnsureCapacity(1) @@ -80,12 +81,12 @@ func TestEndToEndGaugeConsumer(t *testing.T) { } func TestMetricsConsumerNormal(t *testing.T) { - gauge1 := newMetric("gauge1", pdata.MetricDataTypeGauge) - sum1 := newMetric("sum1", pdata.MetricDataTypeSum) - gauge2 := newMetric("gauge2", pdata.MetricDataTypeGauge) - sum2 := newMetric("sum2", pdata.MetricDataTypeSum) - mockGaugeConsumer := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeGauge} - mockSumConsumer := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeSum} + gauge1 := newMetric("gauge1", pmetric.MetricDataTypeGauge) + sum1 := newMetric("sum1", pmetric.MetricDataTypeSum) + gauge2 := newMetric("gauge2", pmetric.MetricDataTypeGauge) + sum2 := newMetric("sum2", pmetric.MetricDataTypeSum) + mockGaugeConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeGauge} + mockSumConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeSum} sender := &mockFlushCloser{} metrics := constructMetrics(gauge1, sum1, gauge2, sum2) consumer := newMetricsConsumer( @@ -105,8 +106,8 @@ func TestMetricsConsumerNormal(t *testing.T) { } func TestMetricsConsumerNormalWithSourceTag(t *testing.T) { - sum := newMetric("sum", pdata.MetricDataTypeSum) - mockSumConsumer := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeSum} + sum := newMetric("sum", pmetric.MetricDataTypeSum) + mockSumConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeSum} sender := &mockFlushCloser{} tags := map[string]string{"source": "test_source", "test_key": "test_value"} metrics := constructMetricsWithTags(tags, sum) @@ -127,8 +128,8 @@ func TestMetricsConsumerNormalWithSourceTag(t *testing.T) { } func TestMetricsConsumerNormalWithHostnameTag(t *testing.T) { - sum := newMetric("sum", pdata.MetricDataTypeSum) - mockSumConsumer := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeSum} + sum := newMetric("sum", pmetric.MetricDataTypeSum) + mockSumConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeSum} sender := &mockFlushCloser{} tags := map[string]string{"host.name": "test_host.name", "hostname": "test_hostname"} metrics := constructMetricsWithTags(tags, sum) @@ -159,8 +160,8 @@ func TestMetricsConsumerNone(t *testing.T) { } func TestNewMetricsConsumerPanicsWithDuplicateMetricType(t *testing.T) { - mockGaugeConsumer1 := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeGauge} - mockGaugeConsumer2 := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeGauge} + mockGaugeConsumer1 := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeGauge} + mockGaugeConsumer2 := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeGauge} assert.Panics(t, func() { newMetricsConsumer( @@ -180,7 +181,7 @@ func TestMetricsConsumerPropagatesErrorsOnFlush(t *testing.T) { } func TestMetricsConsumerErrorsWithUnregisteredMetricType(t *testing.T) { - gauge1 := newMetric("gauge1", pdata.MetricDataTypeGauge) + gauge1 := newMetric("gauge1", pmetric.MetricDataTypeGauge) metrics := constructMetrics(gauge1) consumer := newMetricsConsumer(nil, nil, true) @@ -188,9 +189,9 @@ func TestMetricsConsumerErrorsWithUnregisteredMetricType(t *testing.T) { } func TestMetricsConsumerErrorConsuming(t *testing.T) { - gauge1 := newMetric("gauge1", pdata.MetricDataTypeGauge) + gauge1 := newMetric("gauge1", pmetric.MetricDataTypeGauge) mockGaugeConsumer := &mockTypedMetricConsumer{ - typ: pdata.MetricDataTypeGauge, + typ: pmetric.MetricDataTypeGauge, errorOnConsume: true} metrics := constructMetrics(gauge1) consumer := newMetricsConsumer( @@ -202,8 +203,8 @@ func TestMetricsConsumerErrorConsuming(t *testing.T) { } func TestMetricsConsumerNoReportingInternalMetrics(t *testing.T) { - gauge1 := newMetric("gauge1", pdata.MetricDataTypeGauge) - mockGaugeConsumer := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeGauge} + gauge1 := newMetric("gauge1", pmetric.MetricDataTypeGauge) + mockGaugeConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeGauge} metrics := constructMetrics(gauge1) consumer := newMetricsConsumer( []typedMetricConsumer{mockGaugeConsumer}, nil, false) @@ -213,9 +214,9 @@ func TestMetricsConsumerNoReportingInternalMetrics(t *testing.T) { } func TestMetricsConsumerErrorConsumingInternal(t *testing.T) { - gauge1 := newMetric("gauge1", pdata.MetricDataTypeGauge) + gauge1 := newMetric("gauge1", pmetric.MetricDataTypeGauge) mockGaugeConsumer := &mockTypedMetricConsumer{ - typ: pdata.MetricDataTypeGauge, errorOnPushInternalMetrics: true} + typ: pmetric.MetricDataTypeGauge, errorOnPushInternalMetrics: true} metrics := constructMetrics(gauge1) consumer := newMetricsConsumer( []typedMetricConsumer{mockGaugeConsumer}, nil, true) @@ -227,8 +228,8 @@ func TestMetricsConsumerErrorConsumingInternal(t *testing.T) { func TestMetricsConsumerRespectContext(t *testing.T) { sender := &mockFlushCloser{} - gauge1 := newMetric("gauge1", pdata.MetricDataTypeGauge) - mockGaugeConsumer := &mockTypedMetricConsumer{typ: pdata.MetricDataTypeGauge} + gauge1 := newMetric("gauge1", pmetric.MetricDataTypeGauge) + mockGaugeConsumer := &mockTypedMetricConsumer{typ: pmetric.MetricDataTypeGauge} consumer := newMetricsConsumer( []typedMetricConsumer{mockGaugeConsumer}, sender, true) ctx, cancel := context.WithCancel(context.Background()) @@ -250,7 +251,7 @@ func TestGaugeConsumerErrorSending(t *testing.T) { } func TestGaugeConsumerMissingValue(t *testing.T) { - metric := newMetric("missing.value.metric", pdata.MetricDataTypeGauge) + metric := newMetric("missing.value.metric", pmetric.MetricDataTypeGauge) mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} dataPoints := metric.Gauge().DataPoints() dataPoints.EnsureCapacity(1) @@ -296,10 +297,10 @@ func TestGaugeConsumerMissingValue(t *testing.T) { func TestSumConsumerDelta(t *testing.T) { deltaMetric := newMetric( - "test.delta.metric", pdata.MetricDataTypeSum) + "test.delta.metric", pmetric.MetricDataTypeSum) sum := deltaMetric.Sum() mi := metricInfo{Metric: deltaMetric, Source: "test_source", SourceKey: "host.name"} - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dataPoints := sum.DataPoints() dataPoints.EnsureCapacity(2) addDataPoint( @@ -321,7 +322,7 @@ func TestSumConsumerDelta(t *testing.T) { sender := &mockSumSender{} consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pdata.MetricDataTypeSum, consumer.Type()) + assert.Equal(t, pmetric.MetricDataTypeSum, consumer.Type()) var errs []error // delta sums get treated as delta counters @@ -348,10 +349,10 @@ func TestSumConsumerDelta(t *testing.T) { func TestSumConsumerErrorOnSend(t *testing.T) { deltaMetric := newMetric( - "test.delta.metric", pdata.MetricDataTypeSum) + "test.delta.metric", pmetric.MetricDataTypeSum) sum := deltaMetric.Sum() mi := metricInfo{Metric: deltaMetric, Source: "test_source", SourceKey: "host.name"} - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dataPoints := sum.DataPoints() dataPoints.EnsureCapacity(2) addDataPoint( @@ -373,7 +374,7 @@ func TestSumConsumerErrorOnSend(t *testing.T) { sender := &mockSumSender{errorOnSend: true} consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pdata.MetricDataTypeSum, consumer.Type()) + assert.Equal(t, pmetric.MetricDataTypeSum, consumer.Type()) var errs []error // delta sums get treated as delta counters @@ -384,10 +385,10 @@ func TestSumConsumerErrorOnSend(t *testing.T) { func TestSumConsumerCumulative(t *testing.T) { cumulativeMetric := newMetric( - "test.cumulative.metric", pdata.MetricDataTypeSum) + "test.cumulative.metric", pmetric.MetricDataTypeSum) sum := cumulativeMetric.Sum() mi := metricInfo{Metric: cumulativeMetric, Source: "test_source", SourceKey: "host.name"} - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dataPoints := sum.DataPoints() dataPoints.EnsureCapacity(1) addDataPoint( @@ -400,7 +401,7 @@ func TestSumConsumerCumulative(t *testing.T) { ) sender := &mockSumSender{} consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pdata.MetricDataTypeSum, consumer.Type()) + assert.Equal(t, pmetric.MetricDataTypeSum, consumer.Type()) var errs []error // cumulative sums get treated as regular wavefront metrics @@ -422,10 +423,10 @@ func TestSumConsumerCumulative(t *testing.T) { func TestSumConsumerUnspecified(t *testing.T) { cumulativeMetric := newMetric( - "test.unspecified.metric", pdata.MetricDataTypeSum) + "test.unspecified.metric", pmetric.MetricDataTypeSum) sum := cumulativeMetric.Sum() mi := metricInfo{Metric: cumulativeMetric, Source: "test_source", SourceKey: "host.name"} - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityUnspecified) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityUnspecified) dataPoints := sum.DataPoints() dataPoints.EnsureCapacity(1) addDataPoint( @@ -438,7 +439,7 @@ func TestSumConsumerUnspecified(t *testing.T) { ) sender := &mockSumSender{} consumer := newSumConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pdata.MetricDataTypeSum, consumer.Type()) + assert.Equal(t, pmetric.MetricDataTypeSum, consumer.Type()) var errs []error // unspecified sums get treated as regular wavefront metrics @@ -459,10 +460,10 @@ func TestSumConsumerUnspecified(t *testing.T) { } func TestSumConsumerMissingValue(t *testing.T) { - metric := newMetric("missing.value.metric", pdata.MetricDataTypeSum) + metric := newMetric("missing.value.metric", pmetric.MetricDataTypeSum) sum := metric.Sum() mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dataPoints := sum.DataPoints() dataPoints.EnsureCapacity(1) addDataPoint( @@ -501,7 +502,7 @@ func TestHistogramConsumerDeltaAggregation(t *testing.T) { countAttributeForEachDataPoint := []uint64{2, 5, 10} deltaMetric := newHistogramMetricWithDataPoints( "delta.metric", - pdata.MetricAggregationTemporalityDelta, + pmetric.MetricAggregationTemporalityDelta, countAttributeForEachDataPoint) mi := metricInfo{Metric: deltaMetric, Source: "test_source", SourceKey: "host.name"} sender := &mockGaugeSender{} @@ -533,7 +534,7 @@ func TestHistogramConsumerCumulativeAggregation(t *testing.T) { countAttributeForEachDataPoint := []uint64{2, 5, 10} cumulativeMetric := newHistogramMetricWithDataPoints( "cumulative.metric", - pdata.MetricAggregationTemporalityCumulative, + pmetric.MetricAggregationTemporalityCumulative, countAttributeForEachDataPoint) mi := metricInfo{Metric: cumulativeMetric, Source: "test_source", SourceKey: "host.name"} sender := &mockGaugeSender{} @@ -569,7 +570,7 @@ func TestHistogramConsumerNoAggregation(t *testing.T) { // Create a histogram metric with missing aggregation attribute metric := newHistogramMetricWithDataPoints( "missing.aggregation.metric", - pdata.MetricAggregationTemporalityUnspecified, + pmetric.MetricAggregationTemporalityUnspecified, nil) mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} sender := &mockGaugeSender{} @@ -583,7 +584,7 @@ func TestHistogramConsumerNoAggregation(t *testing.T) { regularHistogram, settings, ) - assert.Equal(t, pdata.MetricDataTypeHistogram, consumer.Type()) + assert.Equal(t, pmetric.MetricDataTypeHistogram, consumer.Type()) var errs []error expectedNoAggregationCount := 3 for i := 0; i < expectedNoAggregationCount; i++ { @@ -606,7 +607,7 @@ func TestHistogramReporting(t *testing.T) { settings := componenttest.NewNopTelemetrySettings() settings.Logger = zap.New(observedZapCore) report := newHistogramReporting(settings) - metric := newMetric("a.metric", pdata.MetricDataTypeHistogram) + metric := newMetric("a.metric", pmetric.MetricDataTypeHistogram) malformedCount := 3 for i := 0; i < malformedCount; i++ { report.LogMalformed(metric) @@ -657,8 +658,8 @@ func TestHistogramReportingError(t *testing.T) { } func TestCumulativeHistogramDataPointConsumer(t *testing.T) { - metric := newMetric("a.metric", pdata.MetricDataTypeHistogram) - histogramDataPoint := pdata.NewHistogramDataPoint() + metric := newMetric("a.metric", pmetric.MetricDataTypeHistogram) + histogramDataPoint := pmetric.NewHistogramDataPoint() mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf histogramDataPoint.SetExplicitBounds([]float64{2.0, 5.0, 10.0}) @@ -705,8 +706,8 @@ func TestCumulativeHistogramDataPointConsumer(t *testing.T) { } func TestCumulativeHistogramDataPointConsumerError(t *testing.T) { - metric := newMetric("a.metric", pdata.MetricDataTypeHistogram) - histogramDataPoint := pdata.NewHistogramDataPoint() + metric := newMetric("a.metric", pmetric.MetricDataTypeHistogram) + histogramDataPoint := pmetric.NewHistogramDataPoint() mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf histogramDataPoint.SetExplicitBounds([]float64{2.0, 5.0, 10.0}) @@ -723,9 +724,9 @@ func TestCumulativeHistogramDataPointConsumerError(t *testing.T) { } func TestCumulativeHistogramDataPointConsumerLeInUse(t *testing.T) { - metric := newMetric("a.metric", pdata.MetricDataTypeHistogram) + metric := newMetric("a.metric", pmetric.MetricDataTypeHistogram) mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} - histogramDataPoint := pdata.NewHistogramDataPoint() + histogramDataPoint := pmetric.NewHistogramDataPoint() histogramDataPoint.SetExplicitBounds([]float64{10.0}) histogramDataPoint.SetBucketCounts([]uint64{4, 12}) histogramDataPoint.Attributes().UpsertInt("le", 8) @@ -758,9 +759,9 @@ func TestCumulativeHistogramDataPointConsumerLeInUse(t *testing.T) { } func TestCumulativeHistogramDataPointConsumerMissingBuckets(t *testing.T) { - metric := newMetric("a.metric", pdata.MetricDataTypeHistogram) + metric := newMetric("a.metric", pmetric.MetricDataTypeHistogram) mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} - histogramDataPoint := pdata.NewHistogramDataPoint() + histogramDataPoint := pmetric.NewHistogramDataPoint() sender := &mockGaugeSender{} report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) consumer := newCumulativeHistogramDataPointConsumer(sender) @@ -774,8 +775,8 @@ func TestCumulativeHistogramDataPointConsumerMissingBuckets(t *testing.T) { } func TestDeltaHistogramDataPointConsumer(t *testing.T) { - metric := newMetric("a.delta.histogram", pdata.MetricDataTypeHistogram) - histogramDataPoint := pdata.NewHistogramDataPoint() + metric := newMetric("a.delta.histogram", pmetric.MetricDataTypeHistogram) + histogramDataPoint := pmetric.NewHistogramDataPoint() mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf histogramDataPoint.SetExplicitBounds([]float64{2.0, 5.0, 10.0}) @@ -813,8 +814,8 @@ func TestDeltaHistogramDataPointConsumer(t *testing.T) { } func TestDeltaHistogramDataPointConsumer_OneBucket(t *testing.T) { - metric := newMetric("one.bucket.delta.histogram", pdata.MetricDataTypeHistogram) - histogramDataPoint := pdata.NewHistogramDataPoint() + metric := newMetric("one.bucket.delta.histogram", pmetric.MetricDataTypeHistogram) + histogramDataPoint := pmetric.NewHistogramDataPoint() mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf histogramDataPoint.SetExplicitBounds([]float64{}) @@ -846,8 +847,8 @@ func TestDeltaHistogramDataPointConsumer_OneBucket(t *testing.T) { } func TestDeltaHistogramDataPointConsumerError(t *testing.T) { - metric := newMetric("a.delta.histogram", pdata.MetricDataTypeHistogram) - histogramDataPoint := pdata.NewHistogramDataPoint() + metric := newMetric("a.delta.histogram", pmetric.MetricDataTypeHistogram) + histogramDataPoint := pmetric.NewHistogramDataPoint() mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} // Creates bounds of -Inf to <=2.0; >2.0 to <=5.0; >5.0 to <=10.0; >10.0 to +Inf histogramDataPoint.SetExplicitBounds([]float64{2.0, 5.0, 10.0}) @@ -863,9 +864,9 @@ func TestDeltaHistogramDataPointConsumerError(t *testing.T) { } func TestDeltaHistogramDataPointConsumerMissingBuckets(t *testing.T) { - metric := newMetric("a.metric", pdata.MetricDataTypeHistogram) + metric := newMetric("a.metric", pmetric.MetricDataTypeHistogram) mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} - histogramDataPoint := pdata.NewHistogramDataPoint() + histogramDataPoint := pmetric.NewHistogramDataPoint() sender := &mockDistributionSender{} report := newHistogramReporting(componenttest.NewNopTelemetrySettings()) consumer := newDeltaHistogramDataPointConsumer(sender) @@ -879,7 +880,7 @@ func TestDeltaHistogramDataPointConsumerMissingBuckets(t *testing.T) { } func TestSummaries(t *testing.T) { - summaryMetric := newMetric("test.summary", pdata.MetricDataTypeSummary) + summaryMetric := newMetric("test.summary", pmetric.MetricDataTypeSummary) summary := summaryMetric.Summary() dataPoints := summary.DataPoints() dataPoints.EnsureCapacity(2) @@ -902,7 +903,7 @@ func TestSummaries(t *testing.T) { sender := &mockGaugeSender{} consumer := newSummaryConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pdata.MetricDataTypeSummary, consumer.Type()) + assert.Equal(t, pmetric.MetricDataTypeSummary, consumer.Type()) var errs []error consumer.Consume(mi, &errs) @@ -999,7 +1000,7 @@ func TestSummaries(t *testing.T) { } func TestSummaries_QuantileTagExists(t *testing.T) { - summaryMetric := newMetric("test.summary.quantile.tag", pdata.MetricDataTypeSummary) + summaryMetric := newMetric("test.summary.quantile.tag", pmetric.MetricDataTypeSummary) summary := summaryMetric.Summary() dataPoints := summary.DataPoints() dataPoints.EnsureCapacity(1) @@ -1045,7 +1046,7 @@ func TestSummaries_QuantileTagExists(t *testing.T) { } func TestSummariesConsumer_ErrorSending(t *testing.T) { - summaryMetric := newMetric("test.summary.error", pdata.MetricDataTypeSummary) + summaryMetric := newMetric("test.summary.error", pmetric.MetricDataTypeSummary) summary := summaryMetric.Summary() mi := metricInfo{Metric: summaryMetric, Source: "test_source", SourceKey: "host.name"} dataPoints := summary.DataPoints() @@ -1063,7 +1064,7 @@ func TestSummariesConsumer_ErrorSending(t *testing.T) { } // Sets quantile values for a summary data point -func setQuantileValues(dataPoint pdata.SummaryDataPoint, quantileValues ...float64) { +func setQuantileValues(dataPoint pmetric.SummaryDataPoint, quantileValues ...float64) { if len(quantileValues)%2 != 0 { panic("quantileValues must be quantile, value, quantile, value, ...") } @@ -1079,10 +1080,10 @@ func setQuantileValues(dataPoint pdata.SummaryDataPoint, quantileValues ...float func TestExponentialHistogramConsumerSpec(t *testing.T) { metric := newExponentialHistogramMetricWithDataPoints( - "a.metric", pdata.MetricAggregationTemporalityDelta, []uint64{4, 7, 11}) - assert.Equal(t, pdata.MetricDataTypeExponentialHistogram, exponentialHistogram.Type()) + "a.metric", pmetric.MetricAggregationTemporalityDelta, []uint64{4, 7, 11}) + assert.Equal(t, pmetric.MetricDataTypeExponentialHistogram, exponentialHistogram.Type()) aHistogram := exponentialHistogram.AsHistogram(metric) - assert.Equal(t, pdata.MetricAggregationTemporalityDelta, aHistogram.AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityDelta, aHistogram.AggregationTemporality()) assert.Equal(t, 3, aHistogram.Len()) assert.Equal(t, uint64(4), aHistogram.At(0).Count()) assert.Equal(t, uint64(7), aHistogram.At(1).Count()) @@ -1090,7 +1091,7 @@ func TestExponentialHistogramConsumerSpec(t *testing.T) { } func TestExponentialHistogramDataPoint(t *testing.T) { - dataPoint := pdata.NewExponentialHistogramDataPoint() + dataPoint := pmetric.NewExponentialHistogramDataPoint() dataPoint.SetScale(1) dataPoint.Negative().SetOffset(6) dataPoint.Negative().SetBucketCounts([]uint64{15, 16, 17}) @@ -1112,7 +1113,7 @@ func TestExponentialHistogramDataPoint(t *testing.T) { } func TestExponentialHistogramDataPoint_ZeroOnly(t *testing.T) { - dataPoint := pdata.NewExponentialHistogramDataPoint() + dataPoint := pmetric.NewExponentialHistogramDataPoint() dataPoint.SetScale(0) dataPoint.Negative().SetOffset(2) dataPoint.Positive().SetOffset(1) @@ -1155,10 +1156,10 @@ func TestAttributesToTagsForMetrics(t *testing.T) { // data point. func newHistogramMetricWithDataPoints( name string, - temporality pdata.MetricAggregationTemporality, + temporality pmetric.MetricAggregationTemporality, countAttributeForEachDataPoint []uint64, -) pdata.Metric { - result := newMetric(name, pdata.MetricDataTypeHistogram) +) pmetric.Metric { + result := newMetric(name, pmetric.MetricDataTypeHistogram) aHistogram := result.Histogram() aHistogram.SetAggregationTemporality(temporality) aHistogram.DataPoints().EnsureCapacity(len(countAttributeForEachDataPoint)) @@ -1171,10 +1172,10 @@ func newHistogramMetricWithDataPoints( // Works like newHistogramMetricWithDataPoints but creates an exponential histogram metric func newExponentialHistogramMetricWithDataPoints( name string, - temporality pdata.MetricAggregationTemporality, + temporality pmetric.MetricAggregationTemporality, countAttributeForEachDataPoint []uint64, -) pdata.Metric { - result := newMetric(name, pdata.MetricDataTypeExponentialHistogram) +) pmetric.Metric { + result := newMetric(name, pmetric.MetricDataTypeExponentialHistogram) aHistogram := result.ExponentialHistogram() aHistogram.SetAggregationTemporality(temporality) aHistogram.DataPoints().EnsureCapacity(len(countAttributeForEachDataPoint)) @@ -1185,7 +1186,7 @@ func newExponentialHistogramMetricWithDataPoints( } func verifyGaugeConsumer(t *testing.T, errorOnSend bool) { - metric := newMetric("test.metric", pdata.MetricDataTypeGauge) + metric := newMetric("test.metric", pmetric.MetricDataTypeGauge) mi := metricInfo{Metric: metric, Source: "test_source", SourceKey: "host.name"} dataPoints := metric.Gauge().DataPoints() dataPoints.EnsureCapacity(2) @@ -1220,7 +1221,7 @@ func verifyGaugeConsumer(t *testing.T, errorOnSend bool) { sender := &mockGaugeSender{errorOnSend: errorOnSend} consumer := newGaugeConsumer(sender, componenttest.NewNopTelemetrySettings()) - assert.Equal(t, pdata.MetricDataTypeGauge, consumer.Type()) + assert.Equal(t, pmetric.MetricDataTypeGauge, consumer.Type()) var errs []error consumer.Consume(mi, &errs) assert.ElementsMatch(t, expected, sender.metrics) @@ -1231,8 +1232,8 @@ func verifyGaugeConsumer(t *testing.T, errorOnSend bool) { } } -func constructMetrics(metricList ...pdata.Metric) pdata.Metrics { - result := pdata.NewMetrics() +func constructMetrics(metricList ...pmetric.Metric) pmetric.Metrics { + result := pmetric.NewMetrics() result.ResourceMetrics().EnsureCapacity(1) rm := result.ResourceMetrics().AppendEmpty() rm.ScopeMetrics().EnsureCapacity(1) @@ -1244,8 +1245,8 @@ func constructMetrics(metricList ...pdata.Metric) pdata.Metrics { return result } -func constructMetricsWithTags(tags map[string]string, metricList ...pdata.Metric) pdata.Metrics { - result := pdata.NewMetrics() +func constructMetricsWithTags(tags map[string]string, metricList ...pmetric.Metric) pmetric.Metrics { + result := pmetric.NewMetrics() result.ResourceMetrics().EnsureCapacity(1) rm := result.ResourceMetrics().AppendEmpty() for key, val := range tags { @@ -1260,8 +1261,8 @@ func constructMetricsWithTags(tags map[string]string, metricList ...pdata.Metric return result } -func newMetric(name string, typ pdata.MetricDataType) pdata.Metric { - result := pdata.NewMetric() +func newMetric(name string, typ pmetric.MetricDataType) pmetric.Metric { + result := pmetric.NewMetric() result.SetName(name) result.SetDataType(typ) return result @@ -1271,26 +1272,26 @@ func addDataPoint( value interface{}, ts int64, tags map[string]interface{}, - slice pdata.NumberDataPointSlice, + slice pmetric.NumberDataPointSlice, ) { dataPoint := slice.AppendEmpty() if value != nil { setDataPointValue(value, dataPoint) } setDataPointTimestamp(ts, dataPoint) - pdata.NewMapFromRaw(tags).CopyTo(dataPoint.Attributes()) + pcommon.NewMapFromRaw(tags).CopyTo(dataPoint.Attributes()) } type dataPointWithTimestamp interface { - SetTimestamp(v pdata.Timestamp) + SetTimestamp(v pcommon.Timestamp) } func setDataPointTimestamp(ts int64, dataPoint dataPointWithTimestamp) { dataPoint.SetTimestamp( - pdata.NewTimestampFromTime(time.Unix(ts, 0))) + pcommon.NewTimestampFromTime(time.Unix(ts, 0))) } -func setDataPointValue(value interface{}, dataPoint pdata.NumberDataPoint) { +func setDataPointValue(value interface{}, dataPoint pmetric.NumberDataPoint) { switch v := value.(type) { case int: dataPoint.SetIntVal(int64(v)) @@ -1369,7 +1370,7 @@ func (m *mockDistributionSender) SendDistribution( } type mockTypedMetricConsumer struct { - typ pdata.MetricDataType + typ pmetric.MetricDataType errorOnConsume bool errorOnPushInternalMetrics bool names []string @@ -1378,7 +1379,7 @@ type mockTypedMetricConsumer struct { pushInternalMetricsCallCount int } -func (m *mockTypedMetricConsumer) Type() pdata.MetricDataType { +func (m *mockTypedMetricConsumer) Type() pmetric.MetricDataType { return m.typ } diff --git a/exporter/tanzuobservabilityexporter/transformer.go b/exporter/tanzuobservabilityexporter/transformer.go index 7849dec4eee7..9a77ddc2b670 100644 --- a/exporter/tanzuobservabilityexporter/transformer.go +++ b/exporter/tanzuobservabilityexporter/transformer.go @@ -21,17 +21,18 @@ import ( "github.com/google/uuid" "github.com/wavefronthq/wavefront-sdk-go/senders" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" ) type traceTransformer struct { - resAttrs pdata.Map + resAttrs pcommon.Map } -func newTraceTransformer(resource pdata.Resource) *traceTransformer { +func newTraceTransformer(resource pcommon.Resource) *traceTransformer { t := &traceTransformer{ resAttrs: resource.Attributes(), } @@ -55,7 +56,7 @@ type span struct { Source string } -func (t *traceTransformer) Span(orig pdata.Span) (span, error) { +func (t *traceTransformer) Span(orig ptrace.Span) (span, error) { traceID, err := traceIDtoUUID(orig.TraceID()) if err != nil { return span{}, errInvalidTraceID @@ -109,10 +110,10 @@ func (t *traceTransformer) Span(orig pdata.Span) (span, error) { }, nil } -func getSourceAndResourceTagsAndSourceKey(attributes pdata.Map) ( +func getSourceAndResourceTagsAndSourceKey(attributes pcommon.Map) ( string, map[string]string, string) { attributesWithoutSource := map[string]string{} - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { attributesWithoutSource[k] = v.AsString() return true }) @@ -132,29 +133,29 @@ func getSourceAndResourceTagsAndSourceKey(attributes pdata.Map) ( return source, attributesWithoutSource, sourceKey } -func getSourceAndResourceTags(attributes pdata.Map) (string, map[string]string) { +func getSourceAndResourceTags(attributes pcommon.Map) (string, map[string]string) { source, attributesWithoutSource, _ := getSourceAndResourceTagsAndSourceKey(attributes) return source, attributesWithoutSource } -func getSourceAndKey(attributes pdata.Map) (string, string) { +func getSourceAndKey(attributes pcommon.Map) (string, string) { source, _, sourceKey := getSourceAndResourceTagsAndSourceKey(attributes) return source, sourceKey } -func spanKind(span pdata.Span) string { +func spanKind(span ptrace.Span) string { switch span.Kind() { - case pdata.SpanKindClient: + case ptrace.SpanKindClient: return "client" - case pdata.SpanKindServer: + case ptrace.SpanKindServer: return "server" - case pdata.SpanKindProducer: + case ptrace.SpanKindProducer: return "producer" - case pdata.SpanKindConsumer: + case ptrace.SpanKindConsumer: return "consumer" - case pdata.SpanKindInternal: + case ptrace.SpanKindInternal: return "internal" - case pdata.SpanKindUnspecified: + case ptrace.SpanKindUnspecified: return "unspecified" default: return "unknown" @@ -175,7 +176,7 @@ func (t *traceTransformer) setRequiredTags(tags map[string]string) { } } -func eventsToLogs(events pdata.SpanEventSlice) []senders.SpanLog { +func eventsToLogs(events ptrace.SpanEventSlice) []senders.SpanLog { var result []senders.SpanLog for i := 0; i < events.Len(); i++ { e := events.At(i) @@ -190,7 +191,7 @@ func eventsToLogs(events pdata.SpanEventSlice) []senders.SpanLog { return result } -func calculateTimes(span pdata.Span) (int64, int64) { +func calculateTimes(span ptrace.Span) (int64, int64) { startMillis := int64(span.StartTimestamp()) / time.Millisecond.Nanoseconds() endMillis := int64(span.EndTimestamp()) / time.Millisecond.Nanoseconds() durationMillis := endMillis - startMillis @@ -201,10 +202,10 @@ func calculateTimes(span pdata.Span) (int64, int64) { return startMillis, durationMillis } -func attributesToTags(attributes ...pdata.Map) map[string]string { +func attributesToTags(attributes ...pcommon.Map) map[string]string { tags := map[string]string{} for _, att := range attributes { - att.Range(func(k string, v pdata.Value) bool { + att.Range(func(k string, v pcommon.Value) bool { tags[k] = v.AsString() return true }) @@ -219,24 +220,24 @@ func replaceSource(tags map[string]string) { } } -func attributesToTagsReplaceSource(attributes ...pdata.Map) map[string]string { +func attributesToTagsReplaceSource(attributes ...pcommon.Map) map[string]string { tags := attributesToTags(attributes...) replaceSource(tags) return tags } -func newMap(tags map[string]string) pdata.Map { +func newMap(tags map[string]string) pcommon.Map { valueMap := make(map[string]interface{}, len(tags)) for key, value := range tags { valueMap[key] = value } - return pdata.NewMapFromRaw(valueMap) + return pcommon.NewMapFromRaw(valueMap) } -func errorTagsFromStatus(status pdata.SpanStatus) map[string]string { +func errorTagsFromStatus(status ptrace.SpanStatus) map[string]string { tags := make(map[string]string) - if status.Code() != pdata.StatusCodeError { + if status.Code() != ptrace.StatusCodeError { return tags } @@ -253,7 +254,7 @@ func errorTagsFromStatus(status pdata.SpanStatus) map[string]string { return tags } -func traceIDtoUUID(id pdata.TraceID) (uuid.UUID, error) { +func traceIDtoUUID(id pcommon.TraceID) (uuid.UUID, error) { formatted, err := uuid.Parse(id.HexString()) if err != nil || id.IsEmpty() { return uuid.Nil, errInvalidTraceID @@ -261,7 +262,7 @@ func traceIDtoUUID(id pdata.TraceID) (uuid.UUID, error) { return formatted, nil } -func spanIDtoUUID(id pdata.SpanID) (uuid.UUID, error) { +func spanIDtoUUID(id pcommon.SpanID) (uuid.UUID, error) { formatted, err := uuid.FromBytes(padTo16Bytes(id.Bytes())) if err != nil || id.IsEmpty() { return uuid.Nil, errInvalidSpanID @@ -269,7 +270,7 @@ func spanIDtoUUID(id pdata.SpanID) (uuid.UUID, error) { return formatted, nil } -func parentSpanIDtoUUID(id pdata.SpanID) uuid.UUID { +func parentSpanIDtoUUID(id pcommon.SpanID) uuid.UUID { if id.IsEmpty() { return uuid.Nil } diff --git a/exporter/tanzuobservabilityexporter/transformer_test.go b/exporter/tanzuobservabilityexporter/transformer_test.go index 95806ed8bdfe..87d37cdbc911 100644 --- a/exporter/tanzuobservabilityexporter/transformer_test.go +++ b/exporter/tanzuobservabilityexporter/transformer_test.go @@ -20,18 +20,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestSpanStartTimeIsConvertedToMilliseconds(t *testing.T) { inNanos := int64(50000000) - att := pdata.NewMap() + att := pcommon.NewMap() transform := transformerFromAttributes(att) - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetStartTimestamp(pdata.Timestamp(inNanos)) + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetStartTimestamp(pcommon.Timestamp(inNanos)) actual, err := transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") @@ -42,13 +43,13 @@ func TestSpanStartTimeIsConvertedToMilliseconds(t *testing.T) { func TestSpanDurationIsCalculatedFromStartAndEndTimes(t *testing.T) { startNanos := int64(50000000) endNanos := int64(60000000) - att := pdata.NewMap() + att := pcommon.NewMap() transform := transformerFromAttributes(att) - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetStartTimestamp(pdata.Timestamp(startNanos)) - span.SetEndTimestamp(pdata.Timestamp(endNanos)) + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetStartTimestamp(pcommon.Timestamp(startNanos)) + span.SetEndTimestamp(pcommon.Timestamp(endNanos)) actual, err := transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") @@ -58,12 +59,12 @@ func TestSpanDurationIsCalculatedFromStartAndEndTimes(t *testing.T) { func TestSpanDurationIsZeroIfEndTimeIsUnset(t *testing.T) { startNanos := int64(50000000) - att := pdata.NewMap() + att := pcommon.NewMap() transform := transformerFromAttributes(att) - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetStartTimestamp(pdata.Timestamp(startNanos)) + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetStartTimestamp(pcommon.Timestamp(startNanos)) actual, err := transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") @@ -72,8 +73,8 @@ func TestSpanDurationIsZeroIfEndTimeIsUnset(t *testing.T) { } func TestSpanStatusCodeErrorAddsErrorTag(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) - actual, err := transform.Span(spanWithStatus(pdata.StatusCodeError, "")) + transform := transformerFromAttributes(pcommon.NewMap()) + actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeError, "")) require.NoError(t, err, "transforming span to wavefront format") errorTag, ok := actual.Tags["error"] @@ -82,8 +83,8 @@ func TestSpanStatusCodeErrorAddsErrorTag(t *testing.T) { } func TestSpanStatusCodeOkDoesNotAddErrorTag(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) - actual, err := transform.Span(spanWithStatus(pdata.StatusCodeOk, "")) + transform := transformerFromAttributes(pcommon.NewMap()) + actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeOk, "")) require.NoError(t, err, "transforming span to wavefront format") _, ok := actual.Tags["error"] @@ -91,8 +92,8 @@ func TestSpanStatusCodeOkDoesNotAddErrorTag(t *testing.T) { } func TestSpanStatusCodeUnsetDoesNotAddErrorTag(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) - actual, err := transform.Span(spanWithStatus(pdata.StatusCodeUnset, "")) + transform := transformerFromAttributes(pcommon.NewMap()) + actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeUnset, "")) require.NoError(t, err, "transforming span to wavefront format") _, ok := actual.Tags["error"] @@ -100,9 +101,9 @@ func TestSpanStatusCodeUnsetDoesNotAddErrorTag(t *testing.T) { } func TestSpanStatusMessageIsConvertedToTag(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) + transform := transformerFromAttributes(pcommon.NewMap()) message := "some error message" - actual, err := transform.Span(spanWithStatus(pdata.StatusCodeError, message)) + actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeError, message)) require.NoError(t, err, "transforming span to wavefront format") @@ -112,8 +113,8 @@ func TestSpanStatusMessageIsConvertedToTag(t *testing.T) { } func TestSpanStatusMessageIsIgnoredIfStatusIsNotError(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) - actual, err := transform.Span(spanWithStatus(pdata.StatusCodeOk, "not a real error message")) + transform := transformerFromAttributes(pcommon.NewMap()) + actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeOk, "not a real error message")) require.NoError(t, err, "transforming span to wavefront format") @@ -128,11 +129,11 @@ func TestSpanStatusMessageIsTruncatedToValidLength(t *testing.T) { * Keep the number of distinct time series per metric and host to under 1000. * -- https://docs.wavefront.com/wavefront_data_format.html */ - transform := transformerFromAttributes(pdata.NewMap()) + transform := transformerFromAttributes(pcommon.NewMap()) message := "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" message += "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" message += "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890" - actual, err := transform.Span(spanWithStatus(pdata.StatusCodeError, message)) + actual, err := transform.Span(spanWithStatus(ptrace.StatusCodeError, message)) require.NoError(t, err, "transforming span to wavefront format") @@ -142,15 +143,15 @@ func TestSpanStatusMessageIsTruncatedToValidLength(t *testing.T) { } func TestSpanEventsAreTranslatedToSpanLogs(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) + transform := transformerFromAttributes(pcommon.NewMap()) now := time.Now() - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - event := pdata.NewSpanEvent() + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + event := ptrace.NewSpanEvent() event.SetName("eventName") - event.SetTimestamp(pdata.NewTimestampFromTime(now)) - eventAttrs := pdata.NewMap() + event.SetTimestamp(pcommon.NewTimestampFromTime(now)) + eventAttrs := pcommon.NewMap() eventAttrs.InsertString("attrKey", "attrVal") eventAttrs.CopyTo(event.Attributes()) event.CopyTo(span.Events().AppendEmpty()) @@ -170,39 +171,39 @@ func TestSpanEventsAreTranslatedToSpanLogs(t *testing.T) { } func TestSpanKindIsTranslatedToTag(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) + transform := transformerFromAttributes(pcommon.NewMap()) - internalSpan, err := transform.Span(spanWithKind(pdata.SpanKindInternal)) + internalSpan, err := transform.Span(spanWithKind(ptrace.SpanKindInternal)) require.NoError(t, err, "transforming span to wavefront format") kind, ok := internalSpan.Tags["span.kind"] assert.True(t, ok) assert.Equal(t, "internal", kind) - serverSpan, err := transform.Span(spanWithKind(pdata.SpanKindServer)) + serverSpan, err := transform.Span(spanWithKind(ptrace.SpanKindServer)) require.NoError(t, err, "transforming span to wavefront format") kind, ok = serverSpan.Tags["span.kind"] assert.True(t, ok) assert.Equal(t, "server", kind) - clientSpan, err := transform.Span(spanWithKind(pdata.SpanKindClient)) + clientSpan, err := transform.Span(spanWithKind(ptrace.SpanKindClient)) require.NoError(t, err, "transforming span to wavefront format") kind, ok = clientSpan.Tags["span.kind"] assert.True(t, ok) assert.Equal(t, "client", kind) - consumerSpan, err := transform.Span(spanWithKind(pdata.SpanKindConsumer)) + consumerSpan, err := transform.Span(spanWithKind(ptrace.SpanKindConsumer)) require.NoError(t, err, "transforming span to wavefront format") kind, ok = consumerSpan.Tags["span.kind"] assert.True(t, ok) assert.Equal(t, "consumer", kind) - producerSpan, err := transform.Span(spanWithKind(pdata.SpanKindProducer)) + producerSpan, err := transform.Span(spanWithKind(ptrace.SpanKindProducer)) require.NoError(t, err, "transforming span to wavefront format") kind, ok = producerSpan.Tags["span.kind"] assert.True(t, ok) assert.Equal(t, "producer", kind) - unspecifiedSpan, err := transform.Span(spanWithKind(pdata.SpanKindUnspecified)) + unspecifiedSpan, err := transform.Span(spanWithKind(ptrace.SpanKindUnspecified)) require.NoError(t, err, "transforming span to wavefront format") kind, ok = unspecifiedSpan.Tags["span.kind"] assert.True(t, ok) @@ -210,7 +211,7 @@ func TestSpanKindIsTranslatedToTag(t *testing.T) { } func TestTraceStateTranslatedToTag(t *testing.T) { - transform := transformerFromAttributes(pdata.NewMap()) + transform := transformerFromAttributes(pcommon.NewMap()) spanWithState, err := transform.Span(spanWithTraceState("key=val")) require.NoError(t, err, "transforming span to wavefront format") @@ -228,26 +229,26 @@ func TestSpanForSourceTag(t *testing.T) { inNanos := int64(50000000) //TestCase1: default value for source - resAttrs := pdata.NewMap() + resAttrs := pcommon.NewMap() transform := transformerFromAttributes(resAttrs) - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetStartTimestamp(pdata.Timestamp(inNanos)) + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetStartTimestamp(pcommon.Timestamp(inNanos)) actual, err := transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") assert.Equal(t, "", actual.Source) //TestCase2: source value from resAttrs.source - resAttrs = pdata.NewMap() + resAttrs = pcommon.NewMap() resAttrs.InsertString(labelSource, "test_source") resAttrs.InsertString(conventions.AttributeHostName, "test_host.name") transform = transformerFromAttributes(resAttrs) - span = pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetStartTimestamp(pdata.Timestamp(inNanos)) + span = ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetStartTimestamp(pcommon.Timestamp(inNanos)) actual, err = transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") @@ -259,14 +260,14 @@ func TestSpanForSourceTag(t *testing.T) { } //TestCase2: source value from resAttrs.host.name when source is not present - resAttrs = pdata.NewMap() + resAttrs = pcommon.NewMap() resAttrs.InsertString("hostname", "test_hostname") resAttrs.InsertString(conventions.AttributeHostName, "test_host.name") transform = transformerFromAttributes(resAttrs) - span = pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetStartTimestamp(pdata.Timestamp(inNanos)) + span = ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetStartTimestamp(pcommon.Timestamp(inNanos)) actual, err = transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") @@ -278,7 +279,7 @@ func TestSpanForSourceTag(t *testing.T) { } //TestCase4: source value from resAttrs.source when spanAttrs.source is present - resAttrs = pdata.NewMap() + resAttrs = pcommon.NewMap() span.Attributes().InsertString(labelSource, "source_from_span_attribute") resAttrs.InsertString(labelSource, "test_source") resAttrs.InsertString(conventions.AttributeHostName, "test_host.name") @@ -298,12 +299,12 @@ func TestSpanForDroppedCount(t *testing.T) { inNanos := int64(50000000) //TestCase: 1 count tags are not set - resAttrs := pdata.NewMap() + resAttrs := pcommon.NewMap() transform := transformerFromAttributes(resAttrs) - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - span.SetStartTimestamp(pdata.Timestamp(inNanos)) + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + span.SetStartTimestamp(pcommon.Timestamp(inNanos)) actual, err := transform.Span(span) require.NoError(t, err, "transforming span to wavefront format") @@ -324,7 +325,7 @@ func TestSpanForDroppedCount(t *testing.T) { } func TestGetSourceAndResourceTags(t *testing.T) { - resAttrs := pdata.NewMap() + resAttrs := pcommon.NewMap() resAttrs.InsertString(labelSource, "test_source") resAttrs.InsertString(conventions.AttributeHostName, "test_host.name") @@ -337,7 +338,7 @@ func TestGetSourceAndResourceTags(t *testing.T) { } func TestGetSourceAndKey(t *testing.T) { - resAttrs := pdata.NewMap() + resAttrs := pcommon.NewMap() resAttrs.InsertString(labelSource, "some_source") resAttrs.InsertString(conventions.AttributeHostName, "test_host.name") @@ -347,7 +348,7 @@ func TestGetSourceAndKey(t *testing.T) { } func TestGetSourceAndKeyNotFound(t *testing.T) { - resAttrs := pdata.NewMap() + resAttrs := pcommon.NewMap() resAttrs.InsertString("foo", "some_source") resAttrs.InsertString("bar", "test_host.name") @@ -368,33 +369,33 @@ func TestAttributesToTagsReplaceSource(t *testing.T) { result) } -func spanWithKind(kind pdata.SpanKind) pdata.Span { - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) +func spanWithKind(kind ptrace.SpanKind) ptrace.Span { + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) span.SetKind(kind) return span } -func spanWithTraceState(state pdata.TraceState) pdata.Span { - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) +func spanWithTraceState(state ptrace.TraceState) ptrace.Span { + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) span.SetTraceState(state) return span } -func transformerFromAttributes(attrs pdata.Map) *traceTransformer { +func transformerFromAttributes(attrs pcommon.Map) *traceTransformer { return &traceTransformer{ resAttrs: attrs, } } -func spanWithStatus(statusCode pdata.StatusCode, message string) pdata.Span { - span := pdata.NewSpan() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) - status := pdata.NewSpanStatus() +func spanWithStatus(statusCode ptrace.StatusCode, message string) ptrace.Span { + span := ptrace.NewSpan() + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1})) + status := ptrace.NewSpanStatus() status.SetCode(statusCode) if message != "" { status.SetMessage(message) diff --git a/exporter/tencentcloudlogserviceexporter/go.mod b/exporter/tencentcloudlogserviceexporter/go.mod index 37fdbb98b80a..e0593090fd7d 100644 --- a/exporter/tencentcloudlogserviceexporter/go.mod +++ b/exporter/tencentcloudlogserviceexporter/go.mod @@ -6,42 +6,40 @@ require ( github.com/pierrec/lz4 v2.6.1+incompatible github.com/stretchr/testify v1.7.1 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.382 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) -require google.golang.org/protobuf v1.28.0 +require ( + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + google.golang.org/protobuf v1.28.0 +) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/frankban/quicktest v1.14.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/tencentcloudlogserviceexporter/go.sum b/exporter/tencentcloudlogserviceexporter/go.sum index 1674c7034c21..392522de47de 100644 --- a/exporter/tencentcloudlogserviceexporter/go.sum +++ b/exporter/tencentcloudlogserviceexporter/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,19 +15,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -38,9 +27,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -50,7 +36,6 @@ github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -67,18 +52,15 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -93,8 +75,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -124,8 +104,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -169,21 +149,16 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -195,20 +170,21 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -232,20 +208,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -261,22 +233,18 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -297,22 +265,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -323,8 +285,6 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -334,8 +294,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/exporter/tencentcloudlogserviceexporter/logs_exporter.go b/exporter/tencentcloudlogserviceexporter/logs_exporter.go index 2cb378f403ca..78fca607e286 100644 --- a/exporter/tencentcloudlogserviceexporter/logs_exporter.go +++ b/exporter/tencentcloudlogserviceexporter/logs_exporter.go @@ -20,7 +20,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -45,7 +45,7 @@ type logServiceLogsSender struct { func (s *logServiceLogsSender) pushLogsData( ctx context.Context, - md pdata.Logs) error { + md plog.Logs) error { var err error clsLogs := convertLogs(md) if len(clsLogs) > 0 { diff --git a/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go b/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go index d3696d03d5a0..6da8656b5c59 100644 --- a/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go +++ b/exporter/tencentcloudlogserviceexporter/logs_exporter_test.go @@ -23,19 +23,20 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) -func createSimpleLogData(numberOfLogs int) pdata.Logs { - logs := pdata.NewLogs() +func createSimpleLogData(numberOfLogs int) plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() rl.ScopeLogs().AppendEmpty() // Add an empty ScopeLogs sl := rl.ScopeLogs().AppendEmpty() for i := 0; i < numberOfLogs; i++ { - ts := pdata.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("mylog") logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp") diff --git a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go index 7cedc08bde08..360b71c23ecd 100644 --- a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go +++ b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice.go @@ -19,8 +19,9 @@ import ( "strconv" "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "google.golang.org/protobuf/proto" cls "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/tencentcloudlogserviceexporter/proto" @@ -44,7 +45,7 @@ const ( clsLogInstrumentationVersion = "otlp.version" ) -func convertLogs(ld pdata.Logs) []*cls.Log { +func convertLogs(ld plog.Logs) []*cls.Log { clsLogs := make([]*cls.Log, 0, ld.LogRecordCount()) rls := ld.ResourceLogs() @@ -69,7 +70,7 @@ func convertLogs(ld pdata.Logs) []*cls.Log { return clsLogs } -func resourceToLogContents(resource pdata.Resource) []*cls.Log_Content { +func resourceToLogContents(resource pcommon.Resource) []*cls.Log_Content { attrs := resource.Attributes() var hostname, serviceName string @@ -82,7 +83,7 @@ func resourceToLogContents(resource pdata.Resource) []*cls.Log_Content { } fields := map[string]interface{}{} - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { if k == conventions.AttributeServiceName || k == conventions.AttributeHostName { return true } @@ -110,7 +111,7 @@ func resourceToLogContents(resource pdata.Resource) []*cls.Log_Content { } } -func instrumentationLibraryToLogContents(instrumentationLibrary pdata.InstrumentationScope) []*cls.Log_Content { +func instrumentationLibraryToLogContents(instrumentationLibrary pcommon.InstrumentationScope) []*cls.Log_Content { return []*cls.Log_Content{ { Key: proto.String(clsLogInstrumentationName), @@ -123,10 +124,10 @@ func instrumentationLibraryToLogContents(instrumentationLibrary pdata.Instrument } } -func mapLogRecordToLogService(lr pdata.LogRecord, +func mapLogRecordToLogService(lr plog.LogRecord, resourceContents, instrumentationLibraryContents []*cls.Log_Content) *cls.Log { - if lr.Body().Type() == pdata.ValueTypeEmpty { + if lr.Body().Type() == pcommon.ValueTypeEmpty { return nil } var clsLog cls.Log @@ -136,7 +137,7 @@ func mapLogRecordToLogService(lr pdata.LogRecord, clsLog.Contents = make([]*cls.Log_Content, 0, preAllocCount+len(resourceContents)+len(instrumentationLibraryContents)) fields := map[string]interface{}{} - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { fields[k] = v.AsString() return true }) diff --git a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go index 49a22d8dd617..0e99b0f238c9 100644 --- a/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go +++ b/exporter/tencentcloudlogserviceexporter/logsdata_to_logservice_test.go @@ -23,8 +23,9 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) type logKeyValuePair struct { @@ -38,19 +39,19 @@ func (kv logKeyValuePairs) Len() int { return len(kv) } func (kv logKeyValuePairs) Swap(i, j int) { kv[i], kv[j] = kv[j], kv[i] } func (kv logKeyValuePairs) Less(i, j int) bool { return kv[i].Key < kv[j].Key } -func getComplexAttributeValueMap() pdata.Value { - mapVal := pdata.NewValueMap() +func getComplexAttributeValueMap() pcommon.Value { + mapVal := pcommon.NewValueMap() mapValReal := mapVal.MapVal() mapValReal.InsertBool("result", true) mapValReal.InsertString("status", "ok") mapValReal.InsertDouble("value", 1.3) mapValReal.InsertInt("code", 200) mapValReal.InsertNull("null") - arrayVal := pdata.NewValueSlice() + arrayVal := pcommon.NewValueSlice() arrayVal.SliceVal().AppendEmpty().SetStringVal("array") mapValReal.Insert("array", arrayVal) - subMapVal := pdata.NewValueMap() + subMapVal := pcommon.NewValueMap() subMapVal.MapVal().InsertString("data", "hello world") mapValReal.Insert("map", subMapVal) @@ -58,8 +59,8 @@ func getComplexAttributeValueMap() pdata.Value { return mapVal } -func createLogData(numberOfLogs int) pdata.Logs { - logs := pdata.NewLogs() +func createLogData(numberOfLogs int) plog.Logs { + logs := plog.NewLogs() logs.ResourceLogs().AppendEmpty() // Add an empty ResourceLogs rl := logs.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertString("resouceKey", "resourceValue") @@ -70,7 +71,7 @@ func createLogData(numberOfLogs int) pdata.Logs { sl.Scope().SetVersion("v0.1.0") for i := 0; i < numberOfLogs; i++ { - ts := pdata.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) + ts := pcommon.Timestamp(int64(i) * time.Millisecond.Nanoseconds()) logRecord := sl.LogRecords().AppendEmpty() switch i { case 0: @@ -88,7 +89,7 @@ func createLogData(numberOfLogs int) pdata.Logs { logRecord.Attributes().Insert("map-value", getComplexAttributeValueMap()) logRecord.Body().SetStringVal("log contents") case 6: - arrayVal := pdata.NewValueSlice() + arrayVal := pcommon.NewValueSlice() arrayVal.SliceVal().AppendEmpty().SetStringVal("array") logRecord.Attributes().Insert("array-value", arrayVal) logRecord.Body().SetStringVal("log contents") diff --git a/exporter/zipkinexporter/go.mod b/exporter/zipkinexporter/go.mod index 24301c96eee0..40b0092354b5 100644 --- a/exporter/zipkinexporter/go.mod +++ b/exporter/zipkinexporter/go.mod @@ -8,13 +8,13 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.48.0 github.com/openzipkin/zipkin-go v0.4.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/apache/thrift v0.16.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect @@ -26,7 +26,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/jaegertracing/jaeger v1.32.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -35,10 +35,10 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -46,10 +46,8 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -65,3 +63,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver => ../../receiver/zipkinreceiver + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/exporter/zipkinexporter/go.sum b/exporter/zipkinexporter/go.sum index e6984a93c3f8..e773231df32e 100644 --- a/exporter/zipkinexporter/go.sum +++ b/exporter/zipkinexporter/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -23,18 +22,16 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -50,7 +47,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -109,7 +105,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= @@ -156,8 +151,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -221,9 +216,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -249,10 +241,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -263,7 +257,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -310,8 +304,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -342,8 +336,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -376,7 +369,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -386,7 +378,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= diff --git a/exporter/zipkinexporter/zipkin.go b/exporter/zipkinexporter/zipkin.go index c57ad2032894..4bf40a56ec19 100644 --- a/exporter/zipkinexporter/zipkin.go +++ b/exporter/zipkinexporter/zipkin.go @@ -25,7 +25,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2" ) @@ -74,7 +74,7 @@ func (ze *zipkinExporter) start(_ context.Context, host component.Host) (err err return } -func (ze *zipkinExporter) pushTraces(ctx context.Context, td pdata.Traces) error { +func (ze *zipkinExporter) pushTraces(ctx context.Context, td ptrace.Traces) error { spans, err := translator.FromTraces(td) if err != nil { return consumererror.NewPermanent(fmt.Errorf("failed to push trace data via Zipkin exporter: %w", err)) diff --git a/extension/asapauthextension/go.mod b/extension/asapauthextension/go.mod index 7f70c0892d38..da2a9ae5304d 100644 --- a/extension/asapauthextension/go.mod +++ b/extension/asapauthextension/go.mod @@ -6,7 +6,7 @@ require ( bitbucket.org/atlassian/go-asap/v2 v2.6.0 github.com/SermoDigital/jose v0.9.2-0.20161205224733-f6df55f235c2 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 google.golang.org/grpc v1.45.0 ) @@ -17,7 +17,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -26,9 +26,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/vincent-petithory/dataurl v1.0.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -41,3 +40,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/asapauthextension/go.sum b/extension/asapauthextension/go.sum index 3af8623e8030..edc264a990de 100644 --- a/extension/asapauthextension/go.sum +++ b/extension/asapauthextension/go.sum @@ -21,7 +21,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -89,7 +89,6 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -120,8 +119,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -171,8 +170,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -189,10 +186,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -234,7 +231,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -260,7 +257,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/extension/awsproxy/go.mod b/extension/awsproxy/go.mod index 33bd91c6932d..9a730fdea2d9 100644 --- a/extension/awsproxy/go.mod +++ b/extension/awsproxy/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy v0.0.0-00010101000000-000000000000 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -15,14 +15,13 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -35,3 +34,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy => ./../../internal/aws/proxy + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/awsproxy/go.sum b/extension/awsproxy/go.sum index aea83af44f88..b7622a340796 100644 --- a/extension/awsproxy/go.sum +++ b/extension/awsproxy/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -48,7 +48,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -80,8 +79,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -124,8 +123,6 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -138,10 +135,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -180,8 +177,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -205,7 +202,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extension/basicauthextension/go.mod b/extension/basicauthextension/go.mod index 4ca7c68fb34f..65c8c92a0c07 100644 --- a/extension/basicauthextension/go.mod +++ b/extension/basicauthextension/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 github.com/tg123/go-htpasswd v1.2.0 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d ) require ( @@ -14,7 +14,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -22,8 +22,7 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -37,3 +36,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/basicauthextension/go.sum b/extension/basicauthextension/go.sum index 23f3adc5b068..49178af7b650 100644 --- a/extension/basicauthextension/go.sum +++ b/extension/basicauthextension/go.sum @@ -109,8 +109,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -156,8 +156,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -172,10 +170,10 @@ github.com/tg123/go-htpasswd v1.2.0/go.mod h1:h7IzlfpvIWnVJhNZ0nQ9HaFxHb7pn5uFJY github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= @@ -242,7 +240,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/extension/bearertokenauthextension/go.mod b/extension/bearertokenauthextension/go.mod index eadb39a93660..df1e86d0fb56 100644 --- a/extension/bearertokenauthextension/go.mod +++ b/extension/bearertokenauthextension/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 ) @@ -14,7 +14,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -22,8 +22,7 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -34,3 +33,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/bearertokenauthextension/go.sum b/extension/bearertokenauthextension/go.sum index 389208cbe043..6d64148c3604 100644 --- a/extension/bearertokenauthextension/go.sum +++ b/extension/bearertokenauthextension/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -83,7 +83,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -114,8 +113,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -163,8 +162,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -178,10 +175,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -223,7 +220,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -248,7 +245,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/extension/fluentbitextension/go.mod b/extension/fluentbitextension/go.mod index d3ead3b09864..e460986ef03d 100644 --- a/extension/fluentbitextension/go.mod +++ b/extension/fluentbitextension/go.mod @@ -7,7 +7,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/shirou/gopsutil/v3 v3.22.3 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -16,7 +16,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -26,11 +26,10 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -41,3 +40,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/fluentbitextension/go.sum b/extension/fluentbitextension/go.sum index fe22822cb04a..c09cc451f0ca 100644 --- a/extension/fluentbitextension/go.sum +++ b/extension/fluentbitextension/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -50,7 +50,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -80,8 +79,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -133,8 +132,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00= github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -153,10 +150,10 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -195,7 +192,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/extension/healthcheckextension/go.mod b/extension/healthcheckextension/go.mod index b441d863ca70..89103ed10a3b 100644 --- a/extension/healthcheckextension/go.mod +++ b/extension/healthcheckextension/go.mod @@ -7,7 +7,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -15,13 +15,12 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -32,3 +31,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/healthcheckextension/go.sum b/extension/healthcheckextension/go.sum index fcd3a10bc5bc..644ed7e702a4 100644 --- a/extension/healthcheckextension/go.sum +++ b/extension/healthcheckextension/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -68,7 +68,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -142,8 +141,6 @@ github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEA github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -158,10 +155,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -201,7 +198,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -223,7 +220,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/extension/httpforwarder/go.mod b/extension/httpforwarder/go.mod index 29213508f56b..124be7842410 100644 --- a/extension/httpforwarder/go.mod +++ b/extension/httpforwarder/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -20,7 +20,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -29,17 +29,20 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/httpforwarder/go.sum b/extension/httpforwarder/go.sum index 9567e59eb59e..d0ae8504b391 100644 --- a/extension/httpforwarder/go.sum +++ b/extension/httpforwarder/go.sum @@ -115,8 +115,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -164,8 +164,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -178,10 +176,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -228,6 +226,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -252,11 +252,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/extension/jaegerremotesampling/go.mod b/extension/jaegerremotesampling/go.mod index 62dd7efadd0e..728ae82a8a22 100644 --- a/extension/jaegerremotesampling/go.mod +++ b/extension/jaegerremotesampling/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/jaegertracing/jaeger v1.32.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 ) @@ -24,7 +24,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -42,7 +42,7 @@ require ( github.com/subosito/gotenv v1.2.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -50,7 +50,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf // indirect @@ -60,3 +61,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/jaegerremotesampling/go.sum b/extension/jaegerremotesampling/go.sum index c9cc65721abd..e35babc9e0a3 100644 --- a/extension/jaegerremotesampling/go.sum +++ b/extension/jaegerremotesampling/go.sum @@ -72,7 +72,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -201,7 +201,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -242,8 +241,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -347,10 +346,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -362,7 +361,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -455,8 +454,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -472,8 +472,9 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/extension/oauth2clientauthextension/go.mod b/extension/oauth2clientauthextension/go.mod index 66f01882095e..2beb2d23e3ad 100644 --- a/extension/oauth2clientauthextension/go.mod +++ b/extension/oauth2clientauthextension/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 google.golang.org/grpc v1.45.0 @@ -21,7 +21,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -30,14 +30,14 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect @@ -46,3 +46,5 @@ require ( ) require go.uber.org/multierr v1.8.0 + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/oauth2clientauthextension/go.sum b/extension/oauth2clientauthextension/go.sum index 590e8f6dec20..c14f69e34a10 100644 --- a/extension/oauth2clientauthextension/go.sum +++ b/extension/oauth2clientauthextension/go.sum @@ -68,7 +68,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -192,7 +192,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -228,8 +227,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -280,8 +279,6 @@ github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -306,10 +303,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -410,8 +407,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/extension/observer/dockerobserver/go.mod b/extension/observer/dockerobserver/go.mod index 6679a133733d..68568f911c54 100644 --- a/extension/observer/dockerobserver/go.mod +++ b/extension/observer/dockerobserver/go.mod @@ -10,7 +10,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -27,7 +27,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -38,14 +38,13 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -63,3 +62,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/conta // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/observer/dockerobserver/go.sum b/extension/observer/dockerobserver/go.sum index 83ff7d4c6faa..8af207f1fd80 100644 --- a/extension/observer/dockerobserver/go.sum +++ b/extension/observer/dockerobserver/go.sum @@ -455,8 +455,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -638,8 +638,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -705,10 +703,10 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -812,7 +810,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -895,8 +893,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extension/observer/ecsobserver/go.mod b/extension/observer/ecsobserver/go.mod index f09572fad485..0e0daea0ccdf 100644 --- a/extension/observer/ecsobserver/go.mod +++ b/extension/observer/ecsobserver/go.mod @@ -6,7 +6,7 @@ require ( github.com/aws/aws-sdk-go v1.43.37 github.com/hashicorp/golang-lru v0.5.4 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 gopkg.in/yaml.v2 v2.4.0 @@ -17,15 +17,14 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -33,3 +32,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/observer/ecsobserver/go.sum b/extension/observer/ecsobserver/go.sum index 3b8340e9f44a..0570c6ff518d 100644 --- a/extension/observer/ecsobserver/go.sum +++ b/extension/observer/ecsobserver/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -50,7 +50,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -84,8 +83,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -131,8 +130,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -145,10 +142,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -187,8 +184,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -213,7 +210,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extension/observer/ecstaskobserver/go.mod b/extension/observer/ecstaskobserver/go.mod index 5f801e9817b7..b1952018be7a 100644 --- a/extension/observer/ecstaskobserver/go.mod +++ b/extension/observer/ecstaskobserver/go.mod @@ -7,7 +7,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -20,20 +20,21 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -47,3 +48,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/e replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/observer/ecstaskobserver/go.sum b/extension/observer/ecstaskobserver/go.sum index 7ea8a3635236..a94fb46fb657 100644 --- a/extension/observer/ecstaskobserver/go.sum +++ b/extension/observer/ecstaskobserver/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -83,7 +83,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -115,8 +114,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -159,8 +158,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -174,10 +171,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -225,7 +222,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -249,12 +247,13 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/extension/observer/go.mod b/extension/observer/go.mod index ea80037ddba4..15e73c7aa3c6 100644 --- a/extension/observer/go.mod +++ b/extension/observer/go.mod @@ -9,3 +9,5 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/observer/hostobserver/go.mod b/extension/observer/hostobserver/go.mod index 4903a174dff4..0c38e5a06d5f 100644 --- a/extension/observer/hostobserver/go.mod +++ b/extension/observer/hostobserver/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.48.0 github.com/shirou/gopsutil/v3 v3.22.3 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -16,7 +16,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -26,11 +26,10 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -43,3 +42,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer => ../ + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/observer/hostobserver/go.sum b/extension/observer/hostobserver/go.sum index fe22822cb04a..c09cc451f0ca 100644 --- a/extension/observer/hostobserver/go.sum +++ b/extension/observer/hostobserver/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -50,7 +50,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -80,8 +79,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -133,8 +132,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00= github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -153,10 +150,10 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -195,7 +192,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/extension/observer/k8sobserver/go.mod b/extension/observer/k8sobserver/go.mod index f1d90df72640..0b31cd6d5e30 100644 --- a/extension/observer/k8sobserver/go.mod +++ b/extension/observer/k8sobserver/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 @@ -24,7 +24,7 @@ require ( github.com/googleapis/gnostic v0.5.5 // indirect github.com/imdario/mergo v0.3.11 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -35,18 +35,17 @@ require ( github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/appengine v1.6.7 // indirect @@ -66,3 +65,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer => ../ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => ../../../internal/k8sconfig + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/observer/k8sobserver/go.sum b/extension/observer/k8sobserver/go.sum index bf4831591d78..86756771b77f 100644 --- a/extension/observer/k8sobserver/go.sum +++ b/extension/observer/k8sobserver/go.sum @@ -67,7 +67,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -206,7 +206,6 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -251,8 +250,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -336,8 +335,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -365,10 +362,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -470,8 +467,9 @@ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -549,13 +547,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extension/oidcauthextension/go.mod b/extension/oidcauthextension/go.mod index 36489c27538d..a7528ffcedd9 100644 --- a/extension/oidcauthextension/go.mod +++ b/extension/oidcauthextension/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/coreos/go-oidc v2.2.1+incompatible github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -14,7 +14,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -23,15 +23,14 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/grpc v1.45.0 // indirect @@ -41,3 +40,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/oidcauthextension/go.sum b/extension/oidcauthextension/go.sum index 29754b9dd2fe..9e5ac4962f26 100644 --- a/extension/oidcauthextension/go.sum +++ b/extension/oidcauthextension/go.sum @@ -177,8 +177,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -227,8 +227,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -250,10 +248,10 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= @@ -337,8 +335,8 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -390,7 +388,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/extension/pprofextension/go.mod b/extension/pprofextension/go.mod index 84a512c50475..69634c5bcaac 100644 --- a/extension/pprofextension/go.mod +++ b/extension/pprofextension/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -13,7 +13,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -21,8 +21,7 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -33,3 +32,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/pprofextension/go.sum b/extension/pprofextension/go.sum index 5632c6899af1..efcf73b28ca4 100644 --- a/extension/pprofextension/go.sum +++ b/extension/pprofextension/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -48,7 +48,6 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -78,8 +77,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -125,8 +124,6 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -139,10 +136,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -181,7 +178,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -204,7 +201,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/extension/sigv4authextension/go.mod b/extension/sigv4authextension/go.mod index 0dc7d5835c71..b14ecf651659 100644 --- a/extension/sigv4authextension/go.mod +++ b/extension/sigv4authextension/go.mod @@ -8,7 +8,7 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.4.3 github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 ) @@ -22,21 +22,21 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/sigv4authextension/go.sum b/extension/sigv4authextension/go.sum index cb75f38fec9c..030ef195fea1 100644 --- a/extension/sigv4authextension/go.sum +++ b/extension/sigv4authextension/go.sum @@ -26,7 +26,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -90,7 +90,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -121,8 +120,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -164,8 +163,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -179,10 +176,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -224,8 +221,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -249,7 +245,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/extension/storage/go.mod b/extension/storage/go.mod index b51ca1fb6ff7..fb637fceadde 100644 --- a/extension/storage/go.mod +++ b/extension/storage/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 go.etcd.io/bbolt v1.3.6 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -27,7 +27,7 @@ require ( github.com/jackc/pgproto3/v2 v2.2.0 // indirect github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect github.com/jackc/pgtype v1.10.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -35,17 +35,18 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/extension/storage/go.sum b/extension/storage/go.sum index 08233e85b4f5..3b4f357c3143 100644 --- a/extension/storage/go.sum +++ b/extension/storage/go.sum @@ -18,7 +18,7 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= @@ -62,7 +62,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -139,8 +138,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -211,8 +210,6 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -232,10 +229,10 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -297,7 +294,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -329,8 +326,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/go.mod b/go.mod index 9fc296f589eb..da097b8bb1c0 100644 --- a/go.mod +++ b/go.mod @@ -126,7 +126,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zookeeperreceiver v0.48.0 github.com/prometheus/prometheus v1.8.2-0.20220324155304-4d8bbfd4164c github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 ) @@ -141,7 +141,7 @@ require ( code.cloudfoundry.org/go-diodes v0.0.0-20211115184647-b584dd5df32c // indirect code.cloudfoundry.org/go-loggregator v7.4.0+incompatible // indirect code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 // indirect - contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect + contrib.go.opencensus.io/exporter/prometheus v0.4.1 // indirect contrib.go.opencensus.io/exporter/stackdriver v0.13.11 // indirect github.com/Azure/azure-sdk-for-go v62.0.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect @@ -306,7 +306,7 @@ require ( github.com/karrick/godirwalk v1.16.1 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b // indirect github.com/leoluk/perflib_exporter v0.1.0 // indirect github.com/lib/pq v1.10.5 // indirect @@ -439,7 +439,8 @@ require ( go.etcd.io/bbolt v1.3.6 // indirect go.mongodb.org/atlas v0.15.0 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/contrib/zpages v0.31.0 // indirect @@ -808,3 +809,5 @@ retract v0.37.0 // Contains dependencies on v0.36.0 components, which should hav // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/go.sum b/go.sum index 5479e014a677..0f125178dfd6 100644 --- a/go.sum +++ b/go.sum @@ -76,8 +76,8 @@ code.cloudfoundry.org/go-loggregator v7.4.0+incompatible h1:KqZYloMQWM5Zg/BQKunO code.cloudfoundry.org/go-loggregator v7.4.0+incompatible/go.mod h1:KPBTRqj+y738Nhf1+g4JHFaBU8j7dedirR5ETNHvMXU= code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 h1:mrZQaZmuDIPhSp6b96b+CRKC2uH44ifa5cjDV2epKis= code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= -contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= contrib.go.opencensus.io/exporter/stackdriver v0.13.11 h1:YzmWJ2OT2K3ouXyMm5FmFQPoDs5TfLjx6Xn5x5CLN0I= contrib.go.opencensus.io/exporter/stackdriver v0.13.11/go.mod h1:I5htMbyta491eUxufwwZPQdcKvvgzMB4O9ni41YnIM8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -1409,8 +1409,8 @@ github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/crc32 v1.2.0/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1786,6 +1786,7 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1895,7 +1896,6 @@ github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu github.com/shirou/gopsutil v3.21.10+incompatible h1:AL2kpVykjkqeN+MFe1WcwSBVUjGjvdU8/ubvCuXAjrU= github.com/shirou/gopsutil v3.21.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil/v3 v3.21.6/go.mod h1:JfVbDpIBLVzT8oKbvMg9P3wEIMDDpVn+LwHTKj0ST88= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00= github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= @@ -2053,11 +2053,9 @@ github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.6 h1:i+SbKraHhnrf9M5MYmvQhFnbLhAXSDWF8WWsuyRdocw= github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= github.com/tklauser/go-sysconf v0.3.6/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -2204,13 +2202,16 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= go.opentelemetry.io/collector/model v0.45.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= @@ -2246,7 +2247,6 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M= -go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= go.opentelemetry.io/otel/exporters/prometheus v0.29.0 h1:jOrFr8pCPj52GCPNq3qd69SEug3QmqDJTzbrefUxkpw= go.opentelemetry.io/otel/exporters/prometheus v0.29.0/go.mod h1:Er2VVJQZbHysogooLNchdZ3MLYoI7+d15mHmrRlRJCU= go.opentelemetry.io/otel/internal/metric v0.23.0/go.mod h1:z+RPiDJe30YnCrOhFGivwBS+DU1JU/PiLKkk4re2DNY= @@ -2264,13 +2264,11 @@ go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= go.opentelemetry.io/otel/sdk/metric v0.29.0 h1:OCEp2igPFXQrGxSR/nwd/bDjkPlPlOVjIULA/ob0dNw= go.opentelemetry.io/otel/sdk/metric v0.29.0/go.mod h1:IFkFNKI8Gq8zBdqOKdODCL9+LInBZLXaGpqSIKphNuU= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= @@ -2652,7 +2650,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/internal/aws/awsutil/go.mod b/internal/aws/awsutil/go.mod index dcb35320bf32..90bb7ad1c3aa 100644 --- a/internal/aws/awsutil/go.mod +++ b/internal/aws/awsutil/go.mod @@ -21,3 +21,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/containerinsight/go.mod b/internal/aws/containerinsight/go.mod index b2e1db30c809..9fb3ae25a060 100644 --- a/internal/aws/containerinsight/go.mod +++ b/internal/aws/containerinsight/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -19,3 +19,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/containerinsight/go.sum b/internal/aws/containerinsight/go.sum index d4baecbf471d..ed6677d10b4f 100644 --- a/internal/aws/containerinsight/go.sum +++ b/internal/aws/containerinsight/go.sum @@ -26,8 +26,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.8.0 h1:CUhrE4N1rqSE6FM9ecihEjRkLQu8cDfgDyoOs83mEY4= go.uber.org/atomic v1.8.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/internal/aws/containerinsight/utils.go b/internal/aws/containerinsight/utils.go index 8b5880e70df1..a4dcbe7c7faf 100644 --- a/internal/aws/containerinsight/utils.go +++ b/internal/aws/containerinsight/utils.go @@ -20,7 +20,8 @@ import ( "strings" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -165,17 +166,17 @@ func GetUnitForMetric(metric string) string { } // ConvertToOTLPMetrics converts a field containing metric values and a tag containing the relevant labels to OTLP metrics -func ConvertToOTLPMetrics(fields map[string]interface{}, tags map[string]string, logger *zap.Logger) pdata.Metrics { - md := pdata.NewMetrics() +func ConvertToOTLPMetrics(fields map[string]interface{}, tags map[string]string, logger *zap.Logger) pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rm := rms.AppendEmpty() - var timestamp pdata.Timestamp + var timestamp pcommon.Timestamp resource := rm.Resource() for tagKey, tagValue := range tags { if tagKey == Timestamp { timeNs, _ := strconv.ParseUint(tagValue, 10, 64) - timestamp = pdata.Timestamp(timeNs) + timestamp = pcommon.Timestamp(timeNs) // convert from nanosecond to millisecond (as emf log use millisecond timestamp) tagValue = strconv.FormatUint(timeNs/uint64(time.Millisecond), 10) } @@ -214,10 +215,10 @@ func ConvertToOTLPMetrics(fields map[string]interface{}, tags map[string]string, return md } -func intGauge(ilm pdata.ScopeMetrics, metricName string, unit string, value int64, ts pdata.Timestamp) { +func intGauge(ilm pmetric.ScopeMetrics, metricName string, unit string, value int64, ts pcommon.Timestamp) { metric := initMetric(ilm, metricName, unit) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) intGauge := metric.Gauge() dataPoints := intGauge.DataPoints() dataPoint := dataPoints.AppendEmpty() @@ -226,10 +227,10 @@ func intGauge(ilm pdata.ScopeMetrics, metricName string, unit string, value int6 dataPoint.SetTimestamp(ts) } -func doubleGauge(ilm pdata.ScopeMetrics, metricName string, unit string, value float64, ts pdata.Timestamp) { +func doubleGauge(ilm pmetric.ScopeMetrics, metricName string, unit string, value float64, ts pcommon.Timestamp) { metric := initMetric(ilm, metricName, unit) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) doubleGauge := metric.Gauge() dataPoints := doubleGauge.DataPoints() dataPoint := dataPoints.AppendEmpty() @@ -238,7 +239,7 @@ func doubleGauge(ilm pdata.ScopeMetrics, metricName string, unit string, value f dataPoint.SetTimestamp(ts) } -func initMetric(ilm pdata.ScopeMetrics, name, unit string) pdata.Metric { +func initMetric(ilm pmetric.ScopeMetrics, name, unit string) pmetric.Metric { metric := ilm.Metrics().AppendEmpty() metric.SetName(name) metric.SetUnit(unit) diff --git a/internal/aws/containerinsight/utils_test.go b/internal/aws/containerinsight/utils_test.go index 018309e0f343..9aad6d963a8e 100644 --- a/internal/aws/containerinsight/utils_test.go +++ b/internal/aws/containerinsight/utils_test.go @@ -21,7 +21,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -141,7 +142,7 @@ func convertToFloat64(value interface{}) float64 { return -1.0 } -func checkMetricsAreExpected(t *testing.T, md pdata.Metrics, fields map[string]interface{}, tags map[string]string, +func checkMetricsAreExpected(t *testing.T, md pmetric.Metrics, fields map[string]interface{}, tags map[string]string, expectedUnits map[string]string) { rms := md.ResourceMetrics() @@ -175,17 +176,17 @@ func checkMetricsAreExpected(t *testing.T, md pdata.Metrics, fields map[string]i assert.Equal(t, expectedUnits[metricName], m.Unit(), "Wrong unit for metric: "+metricName) switch m.DataType() { //we only need to worry about gauge types for container insights metrics - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps := m.Gauge().DataPoints() assert.Equal(t, 1, dps.Len()) dp := dps.At(0) switch dp.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: assert.Equal(t, convertToFloat64(fields[metricName]), dp.DoubleVal()) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: assert.Equal(t, convertToInt64(fields[metricName]), dp.IntVal()) } - assert.Equal(t, pdata.Timestamp(timeUnixNano), dp.Timestamp()) + assert.Equal(t, pcommon.Timestamp(timeUnixNano), dp.Timestamp()) } } } @@ -194,7 +195,7 @@ func checkMetricsAreExpected(t *testing.T, md pdata.Metrics, fields map[string]i func TestConvertToOTLPMetricsForInvalidMetrics(t *testing.T) { var fields map[string]interface{} var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -223,7 +224,7 @@ func TestConvertToOTLPMetricsForClusterMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -283,7 +284,7 @@ func TestConvertToOTLPMetricsForContainerMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -386,7 +387,7 @@ func TestConvertToOTLPMetricsForNodeMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -481,7 +482,7 @@ func TestConvertToOTLPMetricsForNodeDiskIOMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -530,7 +531,7 @@ func TestConvertToOTLPMetricsForNodeFSMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -572,7 +573,7 @@ func TestConvertToOTLPMetricsForNodeNetMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -618,7 +619,7 @@ func TestConvertToOTLPMetricsForPodMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) @@ -719,7 +720,7 @@ func TestConvertToOTLPMetricsForPodNetMetrics(t *testing.T) { var fields map[string]interface{} var expectedUnits map[string]string var tags map[string]string - var md pdata.Metrics + var md pmetric.Metrics now := time.Now() timestamp := strconv.FormatInt(now.UnixNano(), 10) diff --git a/internal/aws/cwlogs/go.mod b/internal/aws/cwlogs/go.mod index 9bf173ead2b3..5cd20cdaea80 100644 --- a/internal/aws/cwlogs/go.mod +++ b/internal/aws/cwlogs/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/aws/aws-sdk-go v1.43.37 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -14,16 +14,15 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -32,3 +31,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/cwlogs/go.sum b/internal/aws/cwlogs/go.sum index 175849f2aeb3..17eeb7412dc3 100644 --- a/internal/aws/cwlogs/go.sum +++ b/internal/aws/cwlogs/go.sum @@ -74,8 +74,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -119,8 +119,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -134,10 +132,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= @@ -199,7 +197,7 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/aws/ecsutil/go.mod b/internal/aws/ecsutil/go.mod index 65ba7c1e407d..015ac24b73e2 100644 --- a/internal/aws/ecsutil/go.mod +++ b/internal/aws/ecsutil/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -19,7 +19,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -28,17 +28,20 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/ecsutil/go.sum b/internal/aws/ecsutil/go.sum index 9567e59eb59e..d0ae8504b391 100644 --- a/internal/aws/ecsutil/go.sum +++ b/internal/aws/ecsutil/go.sum @@ -115,8 +115,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -164,8 +164,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -178,10 +176,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -228,6 +226,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -252,11 +252,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/internal/aws/k8s/go.mod b/internal/aws/k8s/go.mod index 421518eaf91f..97c86cb79c7d 100644 --- a/internal/aws/k8s/go.mod +++ b/internal/aws/k8s/go.mod @@ -48,3 +48,5 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/metrics/go.mod b/internal/aws/metrics/go.mod index 5835188905d1..f3a9abb53540 100644 --- a/internal/aws/metrics/go.mod +++ b/internal/aws/metrics/go.mod @@ -14,3 +14,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/proxy/go.mod b/internal/aws/proxy/go.mod index fa1af0d1c06f..c0f65a2b7dc3 100644 --- a/internal/aws/proxy/go.mod +++ b/internal/aws/proxy/go.mod @@ -6,7 +6,7 @@ require ( github.com/aws/aws-sdk-go v1.43.37 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -21,3 +21,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/proxy/go.sum b/internal/aws/proxy/go.sum index 27ca26608972..eb0c075d779f 100644 --- a/internal/aws/proxy/go.sum +++ b/internal/aws/proxy/go.sum @@ -30,8 +30,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/internal/aws/xray/go.mod b/internal/aws/xray/go.mod index 7d2f25f59b20..4dead7e48cc6 100644 --- a/internal/aws/xray/go.mod +++ b/internal/aws/xray/go.mod @@ -13,3 +13,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/xray/testdata/sampleapp/go.mod b/internal/aws/xray/testdata/sampleapp/go.mod index 1c7a334c50cf..2097944f5ef6 100644 --- a/internal/aws/xray/testdata/sampleapp/go.mod +++ b/internal/aws/xray/testdata/sampleapp/go.mod @@ -26,3 +26,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/aws/xray/testdata/sampleserver/go.mod b/internal/aws/xray/testdata/sampleserver/go.mod index 6a2ec26c34b1..8b209a0e73cd 100644 --- a/internal/aws/xray/testdata/sampleserver/go.mod +++ b/internal/aws/xray/testdata/sampleserver/go.mod @@ -26,3 +26,5 @@ require ( golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/text v0.3.7 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/common/go.mod b/internal/common/go.mod index f5d354b0ec70..45ede24b7b66 100644 --- a/internal/common/go.mod +++ b/internal/common/go.mod @@ -18,3 +18,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/containertest/go.mod b/internal/containertest/go.mod index 3ac0b324de8b..c4b5f2e86889 100644 --- a/internal/containertest/go.mod +++ b/internal/containertest/go.mod @@ -37,3 +37,5 @@ require ( // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/coreinternal/attraction/attraction.go b/internal/coreinternal/attraction/attraction.go index 83c3ff0c513c..b0941595f03f 100644 --- a/internal/coreinternal/attraction/attraction.go +++ b/internal/coreinternal/attraction/attraction.go @@ -21,7 +21,7 @@ import ( "strings" "go.opentelemetry.io/collector/client" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterhelper" @@ -162,7 +162,7 @@ type attributeAction struct { // The reason is attributes processor will most likely be commonly used // and could impact performance. Action Action - AttributeValue *pdata.Value + AttributeValue *pcommon.Value } // AttrProc is an attribute processor. @@ -273,7 +273,7 @@ func NewAttrProc(settings *Settings) (*AttrProc, error) { } // Process applies the AttrProc to an attribute map. -func (ap *AttrProc) Process(ctx context.Context, logger *zap.Logger, attrs pdata.Map) { +func (ap *AttrProc) Process(ctx context.Context, logger *zap.Logger, attrs pcommon.Map) { for _, action := range ap.actions { // TODO https://go.opentelemetry.io/collector/issues/296 // Do benchmark testing between having action be of type string vs integer. @@ -310,18 +310,18 @@ func (ap *AttrProc) Process(ctx context.Context, logger *zap.Logger, attrs pdata } } -func getAttributeValueFromContext(ctx context.Context, key string) (pdata.Value, bool) { +func getAttributeValueFromContext(ctx context.Context, key string) (pcommon.Value, bool) { ci := client.FromContext(ctx) vals := ci.Metadata.Get(key) if len(vals) == 0 { - return pdata.Value{}, false + return pcommon.Value{}, false } - return pdata.NewValueString(strings.Join(vals, ";")), true + return pcommon.NewValueString(strings.Join(vals, ";")), true } -func getSourceAttributeValue(ctx context.Context, action attributeAction, attrs pdata.Map) (pdata.Value, bool) { +func getSourceAttributeValue(ctx context.Context, action attributeAction, attrs pcommon.Map) (pcommon.Value, bool) { // Set the key with a value from the configuration. if action.AttributeValue != nil { return *action.AttributeValue, true @@ -334,23 +334,23 @@ func getSourceAttributeValue(ctx context.Context, action attributeAction, attrs return attrs.Get(action.FromAttribute) } -func hashAttribute(action attributeAction, attrs pdata.Map) { +func hashAttribute(action attributeAction, attrs pcommon.Map) { if value, exists := attrs.Get(action.Key); exists { sha1Hasher(value) } } -func convertAttribute(logger *zap.Logger, action attributeAction, attrs pdata.Map) { +func convertAttribute(logger *zap.Logger, action attributeAction, attrs pcommon.Map) { if value, exists := attrs.Get(action.Key); exists { convertValue(logger, action.Key, action.ConvertedType, value) } } -func extractAttributes(action attributeAction, attrs pdata.Map) { +func extractAttributes(action attributeAction, attrs pcommon.Map) { value, found := attrs.Get(action.Key) // Extracting values only functions on strings. - if !found || value.Type() != pdata.ValueTypeString { + if !found || value.Type() != pcommon.ValueTypeString { return } diff --git a/internal/coreinternal/attraction/attraction_test.go b/internal/coreinternal/attraction/attraction_test.go index fefa93c62cd0..d5d6f805a02b 100644 --- a/internal/coreinternal/attraction/attraction_test.go +++ b/internal/coreinternal/attraction/attraction_test.go @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/client" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) // Common structure for all the Tests @@ -40,10 +40,10 @@ type testCase struct { // runIndividualTestCase is the common logic of passing trace data through a configured attributes processor. func runIndividualTestCase(t *testing.T, tt testCase, ap *AttrProc) { t.Run(tt.name, func(t *testing.T) { - attrMap := pdata.NewMapFromRaw(tt.inputAttributes) + attrMap := pcommon.NewMapFromRaw(tt.inputAttributes) ap.Process(context.TODO(), nil, attrMap) attrMap.Sort() - require.Equal(t, pdata.NewMapFromRaw(tt.expectedAttributes).Sort(), attrMap) + require.Equal(t, pcommon.NewMapFromRaw(tt.expectedAttributes).Sort(), attrMap) }) } @@ -860,7 +860,7 @@ func TestValidConfiguration(t *testing.T) { ap, err := NewAttrProc(cfg) require.NoError(t, err) - av := pdata.NewValueInt(123) + av := pcommon.NewValueInt(123) compiledRegex := regexp.MustCompile(`^\/api\/v1\/document\/(?P.*)\/update$`) assert.Equal(t, []attributeAction{ {Key: "one", Action: DELETE}, @@ -929,10 +929,10 @@ func TestFromContext(t *testing.T) { }) require.Nil(t, err) require.NotNil(t, ap) - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() ap.Process(tc.ctx, nil, attrMap) attrMap.Sort() - require.Equal(t, pdata.NewMapFromRaw(tc.expectedAttributes).Sort(), attrMap) + require.Equal(t, pcommon.NewMapFromRaw(tc.expectedAttributes).Sort(), attrMap) }) } } diff --git a/internal/coreinternal/attraction/hasher.go b/internal/coreinternal/attraction/hasher.go index c0cfa6f07938..52ebae135e7d 100644 --- a/internal/coreinternal/attraction/hasher.go +++ b/internal/coreinternal/attraction/hasher.go @@ -21,7 +21,7 @@ import ( "encoding/hex" "math" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) const ( @@ -38,21 +38,21 @@ var ( // hashed version of the attribute. In practice, this would mostly be used // for string attributes but we support all types for completeness/correctness // and eliminate any surprises. -func sha1Hasher(attr pdata.Value) { +func sha1Hasher(attr pcommon.Value) { var val []byte switch attr.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: val = []byte(attr.StringVal()) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: if attr.BoolVal() { val = byteTrue[:] } else { val = byteFalse[:] } - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: val = make([]byte, int64ByteSize) binary.LittleEndian.PutUint64(val, uint64(attr.IntVal())) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: val = make([]byte, float64ByteSize) binary.LittleEndian.PutUint64(val, math.Float64bits(attr.DoubleVal())) } diff --git a/internal/coreinternal/attraction/type_converter.go b/internal/coreinternal/attraction/type_converter.go index 450ab93470ae..f7ff36e6e2d1 100644 --- a/internal/coreinternal/attraction/type_converter.go +++ b/internal/coreinternal/attraction/type_converter.go @@ -17,7 +17,7 @@ package attraction // import "github.com/open-telemetry/opentelemetry-collector- import ( "strconv" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -27,26 +27,26 @@ const ( doubleConversionTarget = "double" ) -func convertValue(logger *zap.Logger, key string, to string, v pdata.Value) { +func convertValue(logger *zap.Logger, key string, to string, v pcommon.Value) { switch to { case stringConversionTarget: switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: default: v.SetStringVal(v.AsString()) } case intConversionTarget: switch v.Type() { - case pdata.ValueTypeInt: - case pdata.ValueTypeDouble: + case pcommon.ValueTypeInt: + case pcommon.ValueTypeDouble: v.SetIntVal(int64(v.DoubleVal())) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: if v.BoolVal() { v.SetIntVal(1) } else { v.SetIntVal(0) } - case pdata.ValueTypeString: + case pcommon.ValueTypeString: s := v.StringVal() n, err := strconv.ParseInt(s, 10, 64) if err == nil { @@ -59,16 +59,16 @@ func convertValue(logger *zap.Logger, key string, to string, v pdata.Value) { } case doubleConversionTarget: switch v.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: v.SetDoubleVal(float64(v.IntVal())) - case pdata.ValueTypeDouble: - case pdata.ValueTypeBool: + case pcommon.ValueTypeDouble: + case pcommon.ValueTypeBool: if v.BoolVal() { v.SetDoubleVal(1) } else { v.SetDoubleVal(0) } - case pdata.ValueTypeString: + case pcommon.ValueTypeString: s := v.StringVal() n, err := strconv.ParseFloat(s, 64) if err == nil { diff --git a/internal/coreinternal/go.mod b/internal/coreinternal/go.mod index 88439adb6fed..5131871e2cdd 100644 --- a/internal/coreinternal/go.mod +++ b/internal/coreinternal/go.mod @@ -8,8 +8,9 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/spf13/cast v1.4.1 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 ) @@ -20,7 +21,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.7 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -34,3 +35,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/coreinternal/go.sum b/internal/coreinternal/go.sum index 079a4e63f6c0..2cc90b17a99b 100644 --- a/internal/coreinternal/go.sum +++ b/internal/coreinternal/go.sum @@ -81,8 +81,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -149,10 +149,12 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -207,7 +209,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/internal/coreinternal/goldendataset/metrics_gen.go b/internal/coreinternal/goldendataset/metrics_gen.go index 56985ed808c1..5b52883d770c 100644 --- a/internal/coreinternal/goldendataset/metrics_gen.go +++ b/internal/coreinternal/goldendataset/metrics_gen.go @@ -17,7 +17,8 @@ package goldendataset // import "github.com/open-telemetry/opentelemetry-collect import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Simple utilities for generating metrics for testing @@ -26,9 +27,9 @@ import ( // metrics with the corresponding number/type of attributes and pass into MetricsFromCfg to generate metrics. type MetricsCfg struct { // The type of metric to generate - MetricDescriptorType pdata.MetricDataType + MetricDescriptorType pmetric.MetricDataType // MetricValueType is the type of the numeric value: int or double. - MetricValueType pdata.MetricValueType + MetricValueType pmetric.MetricValueType // If MetricDescriptorType is one of the Sum, this describes if the sum is monotonic or not. IsMonotonicSum bool // A prefix for every metric name @@ -57,8 +58,8 @@ type MetricsCfg struct { // (but boring) metrics, and can be used as a starting point for making alterations. func DefaultCfg() MetricsCfg { return MetricsCfg{ - MetricDescriptorType: pdata.MetricDataTypeGauge, - MetricValueType: pdata.MetricValueTypeInt, + MetricDescriptorType: pmetric.MetricDataTypeGauge, + MetricValueType: pmetric.MetricValueTypeInt, MetricNamePrefix: "", NumILMPerResource: 1, NumMetricsPerILM: 1, @@ -72,8 +73,8 @@ func DefaultCfg() MetricsCfg { } } -// MetricsFromCfg produces pdata.Metrics with the passed-in config. -func MetricsFromCfg(cfg MetricsCfg) pdata.Metrics { +// MetricsFromCfg produces pmetric.Metrics with the passed-in config. +func MetricsFromCfg(cfg MetricsCfg) pmetric.Metrics { mg := newMetricGenerator() return mg.genMetricFromCfg(cfg) } @@ -86,8 +87,8 @@ func newMetricGenerator() metricGenerator { return metricGenerator{} } -func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pdata.Metrics { - md := pdata.NewMetrics() +func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pmetric.Metrics { + md := pmetric.NewMetrics() rms := md.ResourceMetrics() rms.EnsureCapacity(cfg.NumResourceMetrics) for i := 0; i < cfg.NumResourceMetrics; i++ { @@ -96,7 +97,7 @@ func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pdata.Metrics { for j := 0; j < cfg.NumResourceAttrs; j++ { resource.Attributes().Insert( fmt.Sprintf("resource-attr-name-%d", j), - pdata.NewValueString(fmt.Sprintf("resource-attr-val-%d", j)), + pcommon.NewValueString(fmt.Sprintf("resource-attr-val-%d", j)), ) } g.populateIlm(cfg, rm) @@ -104,7 +105,7 @@ func (g *metricGenerator) genMetricFromCfg(cfg MetricsCfg) pdata.Metrics { return md } -func (g *metricGenerator) populateIlm(cfg MetricsCfg, rm pdata.ResourceMetrics) { +func (g *metricGenerator) populateIlm(cfg MetricsCfg, rm pmetric.ResourceMetrics) { ilms := rm.ScopeMetrics() ilms.EnsureCapacity(cfg.NumILMPerResource) for i := 0; i < cfg.NumILMPerResource; i++ { @@ -113,48 +114,48 @@ func (g *metricGenerator) populateIlm(cfg MetricsCfg, rm pdata.ResourceMetrics) } } -func (g *metricGenerator) populateMetrics(cfg MetricsCfg, ilm pdata.ScopeMetrics) { +func (g *metricGenerator) populateMetrics(cfg MetricsCfg, ilm pmetric.ScopeMetrics) { metrics := ilm.Metrics() metrics.EnsureCapacity(cfg.NumMetricsPerILM) for i := 0; i < cfg.NumMetricsPerILM; i++ { metric := metrics.AppendEmpty() g.populateMetricDesc(cfg, metric) switch cfg.MetricDescriptorType { - case pdata.MetricDataTypeGauge: - metric.SetDataType(pdata.MetricDataTypeGauge) + case pmetric.MetricDataTypeGauge: + metric.SetDataType(pmetric.MetricDataTypeGauge) populateNumberPoints(cfg, metric.Gauge().DataPoints()) - case pdata.MetricDataTypeSum: - metric.SetDataType(pdata.MetricDataTypeSum) + case pmetric.MetricDataTypeSum: + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() sum.SetIsMonotonic(cfg.IsMonotonicSum) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) populateNumberPoints(cfg, sum.DataPoints()) - case pdata.MetricDataTypeHistogram: - metric.SetDataType(pdata.MetricDataTypeHistogram) + case pmetric.MetricDataTypeHistogram: + metric.SetDataType(pmetric.MetricDataTypeHistogram) histo := metric.Histogram() - histo.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histo.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) populateDoubleHistogram(cfg, histo) } } } -func (g *metricGenerator) populateMetricDesc(cfg MetricsCfg, metric pdata.Metric) { +func (g *metricGenerator) populateMetricDesc(cfg MetricsCfg, metric pmetric.Metric) { metric.SetName(fmt.Sprintf("%smetric_%d", cfg.MetricNamePrefix, g.metricID)) g.metricID++ metric.SetDescription("my-md-description") metric.SetUnit("my-md-units") } -func populateNumberPoints(cfg MetricsCfg, pts pdata.NumberDataPointSlice) { +func populateNumberPoints(cfg MetricsCfg, pts pmetric.NumberDataPointSlice) { pts.EnsureCapacity(cfg.NumPtsPerMetric) for i := 0; i < cfg.NumPtsPerMetric; i++ { pt := pts.AppendEmpty() - pt.SetStartTimestamp(pdata.Timestamp(cfg.StartTime)) + pt.SetStartTimestamp(pcommon.Timestamp(cfg.StartTime)) pt.SetTimestamp(getTimestamp(cfg.StartTime, cfg.StepSize, i)) switch cfg.MetricValueType { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: pt.SetIntVal(int64(cfg.PtVal + i)) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: pt.SetDoubleVal(float64(cfg.PtVal + i)) default: panic("Should not happen") @@ -163,12 +164,12 @@ func populateNumberPoints(cfg MetricsCfg, pts pdata.NumberDataPointSlice) { } } -func populateDoubleHistogram(cfg MetricsCfg, dh pdata.Histogram) { +func populateDoubleHistogram(cfg MetricsCfg, dh pmetric.Histogram) { pts := dh.DataPoints() pts.EnsureCapacity(cfg.NumPtsPerMetric) for i := 0; i < cfg.NumPtsPerMetric; i++ { pt := pts.AppendEmpty() - pt.SetStartTimestamp(pdata.Timestamp(cfg.StartTime)) + pt.SetStartTimestamp(pcommon.Timestamp(cfg.StartTime)) ts := getTimestamp(cfg.StartTime, cfg.StepSize, i) pt.SetTimestamp(ts) populatePtAttributes(cfg, pt.Attributes()) @@ -181,12 +182,12 @@ func populateDoubleHistogram(cfg MetricsCfg, dh pdata.Histogram) { } } -func setDoubleHistogramBounds(hdp pdata.HistogramDataPoint, bounds ...float64) { +func setDoubleHistogramBounds(hdp pmetric.HistogramDataPoint, bounds ...float64) { hdp.SetBucketCounts(make([]uint64, len(bounds))) hdp.SetExplicitBounds(bounds) } -func addDoubleHistogramVal(hdp pdata.HistogramDataPoint, val float64) { +func addDoubleHistogramVal(hdp pmetric.HistogramDataPoint, val float64) { hdp.SetCount(hdp.Count() + 1) hdp.SetSum(hdp.Sum() + val) buckets := hdp.BucketCounts() @@ -200,7 +201,7 @@ func addDoubleHistogramVal(hdp pdata.HistogramDataPoint, val float64) { } } -func populatePtAttributes(cfg MetricsCfg, lm pdata.Map) { +func populatePtAttributes(cfg MetricsCfg, lm pcommon.Map) { for i := 0; i < cfg.NumPtLabels; i++ { k := fmt.Sprintf("pt-label-key-%d", i) v := fmt.Sprintf("pt-label-val-%d", i) @@ -208,6 +209,6 @@ func populatePtAttributes(cfg MetricsCfg, lm pdata.Map) { } } -func getTimestamp(startTime uint64, stepSize uint64, i int) pdata.Timestamp { - return pdata.Timestamp(startTime + (stepSize * uint64(i+1))) +func getTimestamp(startTime uint64, stepSize uint64, i int) pcommon.Timestamp { + return pcommon.Timestamp(startTime + (stepSize * uint64(i+1))) } diff --git a/internal/coreinternal/goldendataset/metrics_gen_test.go b/internal/coreinternal/goldendataset/metrics_gen_test.go index 1745f71f875a..4cc8f616f3ab 100644 --- a/internal/coreinternal/goldendataset/metrics_gen_test.go +++ b/internal/coreinternal/goldendataset/metrics_gen_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestGenDefault(t *testing.T) { @@ -42,7 +42,7 @@ func TestGenDefault(t *testing.T) { require.Equal(t, "my-md-description", pdm.Description()) require.Equal(t, "my-md-units", pdm.Unit()) - require.Equal(t, pdata.MetricDataTypeGauge, pdm.DataType()) + require.Equal(t, pmetric.MetricDataTypeGauge, pdm.DataType()) pts := pdm.Gauge().DataPoints() require.Equal(t, 1, pts.Len()) pt := pts.At(0) @@ -57,7 +57,7 @@ func TestGenDefault(t *testing.T) { } func TestDoubleHistogramFunctions(t *testing.T) { - pt := pdata.NewHistogramDataPoint() + pt := pmetric.NewHistogramDataPoint() setDoubleHistogramBounds(pt, 1, 2, 3, 4, 5) require.Equal(t, 5, len(pt.ExplicitBounds())) require.Equal(t, 5, len(pt.BucketCounts())) @@ -80,7 +80,7 @@ func TestDoubleHistogramFunctions(t *testing.T) { func TestGenDoubleHistogram(t *testing.T) { cfg := DefaultCfg() - cfg.MetricDescriptorType = pdata.MetricDataTypeHistogram + cfg.MetricDescriptorType = pmetric.MetricDataTypeHistogram cfg.PtVal = 2 md := MetricsFromCfg(cfg) pts := getMetric(md).Histogram().DataPoints() @@ -92,7 +92,7 @@ func TestGenDoubleHistogram(t *testing.T) { func TestGenDoubleGauge(t *testing.T) { cfg := DefaultCfg() - cfg.MetricDescriptorType = pdata.MetricDataTypeGauge + cfg.MetricDescriptorType = pmetric.MetricDataTypeGauge md := MetricsFromCfg(cfg) metric := getMetric(md) pts := metric.Gauge().DataPoints() @@ -101,6 +101,6 @@ func TestGenDoubleGauge(t *testing.T) { require.EqualValues(t, float64(1), pt.IntVal()) } -func getMetric(md pdata.Metrics) pdata.Metric { +func getMetric(md pmetric.Metrics) pmetric.Metric { return md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) } diff --git a/internal/coreinternal/goldendataset/pict_metrics_gen.go b/internal/coreinternal/goldendataset/pict_metrics_gen.go index f0d914aca163..5e29f65b4f42 100644 --- a/internal/coreinternal/goldendataset/pict_metrics_gen.go +++ b/internal/coreinternal/goldendataset/pict_metrics_gen.go @@ -17,17 +17,17 @@ package goldendataset // import "github.com/open-telemetry/opentelemetry-collect import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // GenerateMetrics takes the filename of a PICT-generated file, walks through all of the rows in the PICT // file and for each row, generates a MetricData object, collecting them and returning them to the caller. -func GenerateMetrics(metricPairsFile string) ([]pdata.Metrics, error) { +func GenerateMetrics(metricPairsFile string) ([]pmetric.Metrics, error) { pictData, err := loadPictOutputFile(metricPairsFile) if err != nil { return nil, err } - var out []pdata.Metrics + var out []pmetric.Metrics for i, values := range pictData { if i == 0 { continue @@ -65,33 +65,33 @@ func pictToCfg(inputs PICTMetricInputs) MetricsCfg { switch inputs.MetricType { case MetricTypeIntGauge: - cfg.MetricDescriptorType = pdata.MetricDataTypeGauge - cfg.MetricValueType = pdata.MetricValueTypeInt + cfg.MetricDescriptorType = pmetric.MetricDataTypeGauge + cfg.MetricValueType = pmetric.MetricValueTypeInt case MetricTypeMonotonicIntSum: - cfg.MetricDescriptorType = pdata.MetricDataTypeSum - cfg.MetricValueType = pdata.MetricValueTypeInt + cfg.MetricDescriptorType = pmetric.MetricDataTypeSum + cfg.MetricValueType = pmetric.MetricValueTypeInt cfg.IsMonotonicSum = true case MetricTypeNonMonotonicIntSum: - cfg.MetricDescriptorType = pdata.MetricDataTypeSum - cfg.MetricValueType = pdata.MetricValueTypeInt + cfg.MetricDescriptorType = pmetric.MetricDataTypeSum + cfg.MetricValueType = pmetric.MetricValueTypeInt cfg.IsMonotonicSum = false case MetricTypeDoubleGauge: - cfg.MetricDescriptorType = pdata.MetricDataTypeGauge - cfg.MetricValueType = pdata.MetricValueTypeDouble + cfg.MetricDescriptorType = pmetric.MetricDataTypeGauge + cfg.MetricValueType = pmetric.MetricValueTypeDouble case MetricTypeMonotonicDoubleSum: - cfg.MetricDescriptorType = pdata.MetricDataTypeSum - cfg.MetricValueType = pdata.MetricValueTypeDouble + cfg.MetricDescriptorType = pmetric.MetricDataTypeSum + cfg.MetricValueType = pmetric.MetricValueTypeDouble cfg.IsMonotonicSum = true case MetricTypeNonMonotonicDoubleSum: - cfg.MetricDescriptorType = pdata.MetricDataTypeSum - cfg.MetricValueType = pdata.MetricValueTypeDouble + cfg.MetricDescriptorType = pmetric.MetricDataTypeSum + cfg.MetricValueType = pmetric.MetricValueTypeDouble cfg.IsMonotonicSum = false case MetricTypeDoubleExemplarsHistogram: - cfg.MetricDescriptorType = pdata.MetricDataTypeHistogram - cfg.MetricValueType = pdata.MetricValueTypeNone + cfg.MetricDescriptorType = pmetric.MetricDataTypeHistogram + cfg.MetricValueType = pmetric.MetricValueTypeNone case MetricTypeIntExemplarsHistogram: - cfg.MetricDescriptorType = pdata.MetricDataTypeHistogram - cfg.MetricValueType = pdata.MetricValueTypeNone + cfg.MetricDescriptorType = pmetric.MetricDataTypeHistogram + cfg.MetricValueType = pmetric.MetricValueTypeNone default: panic("Should not happen, unsupported type " + string(inputs.MetricType)) } diff --git a/internal/coreinternal/goldendataset/pict_metrics_gen_test.go b/internal/coreinternal/goldendataset/pict_metrics_gen_test.go index dcd3c24d328c..f7d9da4f23fe 100644 --- a/internal/coreinternal/goldendataset/pict_metrics_gen_test.go +++ b/internal/coreinternal/goldendataset/pict_metrics_gen_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestGenerateMetricDatas(t *testing.T) { @@ -45,8 +45,8 @@ func TestPICTtoCfg(t *testing.T) { cfg: MetricsCfg{ NumResourceAttrs: 0, NumPtsPerMetric: 1, - MetricDescriptorType: pdata.MetricDataTypeGauge, - MetricValueType: pdata.MetricValueTypeInt, + MetricDescriptorType: pmetric.MetricDataTypeGauge, + MetricValueType: pmetric.MetricValueTypeInt, NumPtLabels: 0, }, }, @@ -61,8 +61,8 @@ func TestPICTtoCfg(t *testing.T) { cfg: MetricsCfg{ NumResourceAttrs: 1, NumPtsPerMetric: 1, - MetricDescriptorType: pdata.MetricDataTypeGauge, - MetricValueType: pdata.MetricValueTypeDouble, + MetricDescriptorType: pmetric.MetricDataTypeGauge, + MetricValueType: pmetric.MetricValueTypeDouble, NumPtLabels: 1, }, }, @@ -77,7 +77,7 @@ func TestPICTtoCfg(t *testing.T) { cfg: MetricsCfg{ NumResourceAttrs: 2, NumPtsPerMetric: 16, - MetricDescriptorType: pdata.MetricDataTypeHistogram, + MetricDescriptorType: pmetric.MetricDataTypeHistogram, NumPtLabels: 16, }, }, diff --git a/internal/coreinternal/goldendataset/resource_generator.go b/internal/coreinternal/goldendataset/resource_generator.go index f7af9a6fc08a..6bc9f16e8125 100644 --- a/internal/coreinternal/goldendataset/resource_generator.go +++ b/internal/coreinternal/goldendataset/resource_generator.go @@ -15,14 +15,14 @@ package goldendataset // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/goldendataset" import ( - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) // GenerateResource generates a PData Resource object with representative attributes for the // underlying resource type specified by the rscID input parameter. -func GenerateResource(rscID PICTInputResource) pdata.Resource { - resource := pdata.NewResource() +func GenerateResource(rscID PICTInputResource) pcommon.Resource { + resource := pcommon.NewResource() switch rscID { case ResourceEmpty: break @@ -42,11 +42,11 @@ func GenerateResource(rscID PICTInputResource) pdata.Resource { return resource } -func appendOnpremVMAttributes(attrMap pdata.Map) { +func appendOnpremVMAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeServiceName, "customers") attrMap.UpsertString(conventions.AttributeServiceNamespace, "production") attrMap.UpsertString(conventions.AttributeServiceVersion, "semver:0.7.3") - subMap := pdata.NewValueMap() + subMap := pcommon.NewValueMap() subMap.MapVal().InsertString("public", "tc-prod9.internal.example.com") subMap.MapVal().InsertString("internal", "172.18.36.18") attrMap.Upsert(conventions.AttributeHostName, subMap) @@ -56,7 +56,7 @@ func appendOnpremVMAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeTelemetrySDKVersion, "0.3.0") } -func appendCloudVMAttributes(attrMap pdata.Map) { +func appendCloudVMAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeServiceName, "shoppingcart") attrMap.UpsertString(conventions.AttributeServiceName, "customers") attrMap.UpsertString(conventions.AttributeServiceNamespace, "production") @@ -73,7 +73,7 @@ func appendCloudVMAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeCloudRegion, "South Central US") } -func appendOnpremK8sAttributes(attrMap pdata.Map) { +func appendOnpremK8sAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeContainerName, "cert-manager") attrMap.UpsertString(conventions.AttributeContainerImageName, "quay.io/jetstack/cert-manager-controller") attrMap.UpsertString(conventions.AttributeContainerImageTag, "v0.14.2") @@ -84,7 +84,7 @@ func appendOnpremK8sAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeHostName, "docker-desktop") } -func appendCloudK8sAttributes(attrMap pdata.Map) { +func appendCloudK8sAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeContainerName, "otel-collector") attrMap.UpsertString(conventions.AttributeContainerImageName, "otel/opentelemetry-collector-contrib") attrMap.UpsertString(conventions.AttributeContainerImageTag, "0.4.0") @@ -106,7 +106,7 @@ func appendCloudK8sAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeCloudAvailabilityZone, "us-east-1c") } -func appendFassAttributes(attrMap pdata.Map) { +func appendFassAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeFaaSID, "https://us-central1-dist-system-demo.cloudfunctions.net/env-vars-print") attrMap.UpsertString(conventions.AttributeFaaSName, "env-vars-print") attrMap.UpsertString(conventions.AttributeFaaSVersion, "semver:1.0.0") @@ -116,9 +116,9 @@ func appendFassAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeCloudAvailabilityZone, "us-central1-a") } -func appendExecAttributes(attrMap pdata.Map) { +func appendExecAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeProcessExecutableName, "otelcol") - parts := pdata.NewValueSlice() + parts := pcommon.NewValueSlice() parts.SliceVal().AppendEmpty().SetStringVal("otelcol") parts.SliceVal().AppendEmpty().SetStringVal("--config=/etc/otel-collector-config.yaml") attrMap.Upsert(conventions.AttributeProcessCommandLine, parts) diff --git a/internal/coreinternal/goldendataset/span_generator.go b/internal/coreinternal/goldendataset/span_generator.go index 3ed343714f05..465167ee6031 100644 --- a/internal/coreinternal/goldendataset/span_generator.go +++ b/internal/coreinternal/goldendataset/span_generator.go @@ -19,14 +19,15 @@ import ( "io" "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) -var statusCodeMap = map[PICTInputStatus]pdata.StatusCode{ - SpanStatusUnset: pdata.StatusCodeUnset, - SpanStatusOk: pdata.StatusCodeOk, - SpanStatusError: pdata.StatusCodeError, +var statusCodeMap = map[PICTInputStatus]ptrace.StatusCode{ + SpanStatusUnset: ptrace.StatusCodeUnset, + SpanStatusOk: ptrace.StatusCodeOk, + SpanStatusError: ptrace.StatusCodeError, } var statusMsgMap = map[PICTInputStatus]string{ @@ -35,12 +36,12 @@ var statusMsgMap = map[PICTInputStatus]string{ SpanStatusError: "Error", } -// appendSpans appends to the pdata.SpanSlice objects the number of spans specified by the count input +// appendSpans appends to the ptrace.SpanSlice objects the number of spans specified by the count input // parameter. The random parameter injects the random number generator to use in generating IDs and other random values. // Using a random number generator with the same seed value enables reproducible tests. // // If err is not nil, the spans slice will have nil values. -func appendSpans(count int, pictFile string, random io.Reader, spanList pdata.SpanSlice) error { +func appendSpans(count int, pictFile string, random io.Reader, spanList ptrace.SpanSlice) error { pairsData, err := loadPictOutputFile(pictFile) if err != nil { return err @@ -49,8 +50,8 @@ func appendSpans(count int, pictFile string, random io.Reader, spanList pdata.Sp index := 1 var inputs []string var spanInputs *PICTSpanInputs - var traceID pdata.TraceID - var parentID pdata.SpanID + var traceID pcommon.TraceID + var parentID pcommon.SpanID for i := 0; i < count; i++ { if index >= pairsTotal { index = 1 @@ -68,7 +69,7 @@ func appendSpans(count int, pictFile string, random io.Reader, spanList pdata.Sp switch spanInputs.Parent { case SpanParentRoot: traceID = generateTraceID(random) - parentID = pdata.NewSpanID([8]byte{}) + parentID = pcommon.NewSpanID([8]byte{}) case SpanParentChild: // use existing if available if traceID.IsEmpty() { @@ -90,7 +91,7 @@ func generateSpanName(spanInputs *PICTSpanInputs) string { spanInputs.Attributes, spanInputs.Events, spanInputs.Links, spanInputs.Status) } -// fillSpan generates a single pdata.Span based on the input values provided. They are: +// fillSpan generates a single ptrace.Span based on the input values provided. They are: // traceID - the trace ID to use, should not be nil // parentID - the parent span ID or nil if it is a root span // spanName - the span name, should not be blank @@ -98,7 +99,7 @@ func generateSpanName(spanInputs *PICTSpanInputs) string { // random - the random number generator to use in generating ID values // // The generated span is returned. -func fillSpan(traceID pdata.TraceID, parentID pdata.SpanID, spanName string, spanInputs *PICTSpanInputs, random io.Reader, span pdata.Span) { +func fillSpan(traceID pcommon.TraceID, parentID pcommon.SpanID, spanName string, spanInputs *PICTSpanInputs, random io.Reader, span ptrace.Span) { endTime := time.Now().Add(-50 * time.Microsecond) span.SetTraceID(traceID) span.SetSpanID(generateSpanID(random)) @@ -106,8 +107,8 @@ func fillSpan(traceID pdata.TraceID, parentID pdata.SpanID, spanName string, spa span.SetParentSpanID(parentID) span.SetName(spanName) span.SetKind(lookupSpanKind(spanInputs.Kind)) - span.SetStartTimestamp(pdata.Timestamp(endTime.Add(-215 * time.Millisecond).UnixNano())) - span.SetEndTimestamp(pdata.Timestamp(endTime.UnixNano())) + span.SetStartTimestamp(pcommon.Timestamp(endTime.Add(-215 * time.Millisecond).UnixNano())) + span.SetEndTimestamp(pcommon.Timestamp(endTime.UnixNano())) appendSpanAttributes(spanInputs.Attributes, spanInputs.Status, span.Attributes()) span.SetDroppedAttributesCount(0) appendSpanEvents(spanInputs.Events, span.Events()) @@ -117,7 +118,7 @@ func fillSpan(traceID pdata.TraceID, parentID pdata.SpanID, spanName string, spa fillStatus(spanInputs.Status, span.Status()) } -func generateTraceState(tracestate PICTInputTracestate) pdata.TraceState { +func generateTraceState(tracestate PICTInputTracestate) ptrace.TraceState { switch tracestate { case TraceStateOne: return "lasterror=f39cd56cc44274fd5abd07ef1164246d10ce2955" @@ -130,26 +131,26 @@ func generateTraceState(tracestate PICTInputTracestate) pdata.TraceState { } } -func lookupSpanKind(kind PICTInputKind) pdata.SpanKind { +func lookupSpanKind(kind PICTInputKind) ptrace.SpanKind { switch kind { case SpanKindClient: - return pdata.SpanKindClient + return ptrace.SpanKindClient case SpanKindServer: - return pdata.SpanKindServer + return ptrace.SpanKindServer case SpanKindProducer: - return pdata.SpanKindProducer + return ptrace.SpanKindProducer case SpanKindConsumer: - return pdata.SpanKindConsumer + return ptrace.SpanKindConsumer case SpanKindInternal: - return pdata.SpanKindInternal + return ptrace.SpanKindInternal case SpanKindUnspecified: fallthrough default: - return pdata.SpanKindUnspecified + return ptrace.SpanKindUnspecified } } -func appendSpanAttributes(spanTypeID PICTInputAttributes, statusStr PICTInputStatus, attrMap pdata.Map) { +func appendSpanAttributes(spanTypeID PICTInputAttributes, statusStr PICTInputStatus, attrMap pcommon.Map) { includeStatus := statusStr != SpanStatusUnset switch spanTypeID { case SpanAttrEmpty: @@ -189,7 +190,7 @@ func appendSpanAttributes(spanTypeID PICTInputAttributes, statusStr PICTInputSta } } -func fillStatus(statusStr PICTInputStatus, spanStatus pdata.SpanStatus) { +func fillStatus(statusStr PICTInputStatus, spanStatus ptrace.SpanStatus) { if statusStr == SpanStatusUnset { return } @@ -197,7 +198,7 @@ func fillStatus(statusStr PICTInputStatus, spanStatus pdata.SpanStatus) { spanStatus.SetMessage(statusMsgMap[statusStr]) } -func appendDatabaseSQLAttributes(attrMap pdata.Map) { +func appendDatabaseSQLAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeDBSystem, "mysql") attrMap.UpsertString(conventions.AttributeDBConnectionString, "Server=shopdb.example.com;Database=ShopDb;Uid=billing_user;TableCache=true;UseCompression=True;MinimumPoolSize=10;MaximumPoolSize=50;") attrMap.UpsertString(conventions.AttributeDBUser, "billing_user") @@ -212,7 +213,7 @@ func appendDatabaseSQLAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendDatabaseNoSQLAttributes(attrMap pdata.Map) { +func appendDatabaseNoSQLAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeDBSystem, "mongodb") attrMap.UpsertString(conventions.AttributeDBUser, "the_user") attrMap.UpsertString(conventions.AttributeNetPeerName, "mongodb0.example.com") @@ -225,7 +226,7 @@ func appendDatabaseNoSQLAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendFaaSDatasourceAttributes(attrMap pdata.Map) { +func appendFaaSDatasourceAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeFaaSTrigger, conventions.AttributeFaaSTriggerDatasource) attrMap.UpsertString(conventions.AttributeFaaSExecution, "DB85AF51-5E13-473D-8454-1E2D59415EAB") attrMap.UpsertString(conventions.AttributeFaaSDocumentCollection, "faa-flight-delay-information-incoming") @@ -235,7 +236,7 @@ func appendFaaSDatasourceAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendFaaSHTTPAttributes(includeStatus bool, attrMap pdata.Map) { +func appendFaaSHTTPAttributes(includeStatus bool, attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeFaaSTrigger, conventions.AttributeFaaSTriggerHTTP) attrMap.UpsertString(conventions.AttributeHTTPMethod, "POST") attrMap.UpsertString(conventions.AttributeHTTPScheme, "https") @@ -250,7 +251,7 @@ func appendFaaSHTTPAttributes(includeStatus bool, attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendFaaSPubSubAttributes(attrMap pdata.Map) { +func appendFaaSPubSubAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeFaaSTrigger, conventions.AttributeFaaSTriggerPubsub) attrMap.UpsertString(conventions.AttributeMessagingSystem, "sqs") attrMap.UpsertString(conventions.AttributeMessagingDestination, "video-views-au") @@ -258,7 +259,7 @@ func appendFaaSPubSubAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendFaaSTimerAttributes(attrMap pdata.Map) { +func appendFaaSTimerAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeFaaSTrigger, conventions.AttributeFaaSTriggerTimer) attrMap.UpsertString(conventions.AttributeFaaSExecution, "73103A4C-E22F-4493-BDE8-EAE5CAB37B50") attrMap.UpsertString(conventions.AttributeFaaSTime, "2020-05-09T20:00:08Z") @@ -266,7 +267,7 @@ func appendFaaSTimerAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendFaaSOtherAttributes(attrMap pdata.Map) { +func appendFaaSOtherAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeFaaSTrigger, conventions.AttributeFaaSTriggerOther) attrMap.UpsertInt("processed.count", 256) attrMap.UpsertDouble("processed.data", 14.46) @@ -274,7 +275,7 @@ func appendFaaSOtherAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendHTTPClientAttributes(includeStatus bool, attrMap pdata.Map) { +func appendHTTPClientAttributes(includeStatus bool, attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeHTTPMethod, "GET") attrMap.UpsertString(conventions.AttributeHTTPURL, "https://opentelemetry.io/registry/") if includeStatus { @@ -284,7 +285,7 @@ func appendHTTPClientAttributes(includeStatus bool, attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendHTTPServerAttributes(includeStatus bool, attrMap pdata.Map) { +func appendHTTPServerAttributes(includeStatus bool, attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeHTTPMethod, "POST") attrMap.UpsertString(conventions.AttributeHTTPScheme, "https") attrMap.UpsertString(conventions.AttributeHTTPServerName, "api22.opentelemetry.io") @@ -301,7 +302,7 @@ func appendHTTPServerAttributes(includeStatus bool, attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendMessagingProducerAttributes(attrMap pdata.Map) { +func appendMessagingProducerAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeMessagingSystem, "nats") attrMap.UpsertString(conventions.AttributeMessagingDestination, "time.us.east.atlanta") attrMap.UpsertString(conventions.AttributeMessagingDestinationKind, "topic") @@ -311,7 +312,7 @@ func appendMessagingProducerAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendMessagingConsumerAttributes(attrMap pdata.Map) { +func appendMessagingConsumerAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeMessagingSystem, "kafka") attrMap.UpsertString(conventions.AttributeMessagingDestination, "infrastructure-events-zone1") attrMap.UpsertString(conventions.AttributeMessagingOperation, "receive") @@ -319,25 +320,25 @@ func appendMessagingConsumerAttributes(attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendGRPCClientAttributes(attrMap pdata.Map) { +func appendGRPCClientAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeRPCService, "PullRequestsService") attrMap.UpsertString(conventions.AttributeNetPeerIP, "2600:1700:1f00:11c0:4de0:c223:a800:4e87") attrMap.UpsertInt(conventions.AttributeNetHostPort, 8443) attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendGRPCServerAttributes(attrMap pdata.Map) { +func appendGRPCServerAttributes(attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeRPCService, "PullRequestsService") attrMap.UpsertString(conventions.AttributeNetPeerIP, "192.168.1.70") attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendInternalAttributes(attrMap pdata.Map) { +func appendInternalAttributes(attrMap pcommon.Map) { attrMap.UpsertString("parameters", "account=7310,amount=1817.10") attrMap.UpsertString(conventions.AttributeEnduserID, "unittest") } -func appendMaxCountAttributes(includeStatus bool, attrMap pdata.Map) { +func appendMaxCountAttributes(includeStatus bool, attrMap pcommon.Map) { attrMap.UpsertString(conventions.AttributeHTTPMethod, "POST") attrMap.UpsertString(conventions.AttributeHTTPScheme, "https") attrMap.UpsertString(conventions.AttributeHTTPHost, "api.opentelemetry.io") @@ -361,12 +362,12 @@ func appendMaxCountAttributes(includeStatus bool, attrMap pdata.Map) { attrMap.UpsertBool("ai-sampler.absolute", false) attrMap.UpsertInt("ai-sampler.maxhops", 6) attrMap.UpsertString("application.create.location", "https://api.opentelemetry.io/blog/posts/806673B9-4F4D-4284-9635-3A3E3E3805BE") - stages := pdata.NewValueSlice() + stages := pcommon.NewValueSlice() stages.SliceVal().AppendEmpty().SetStringVal("Launch") stages.SliceVal().AppendEmpty().SetStringVal("Injestion") stages.SliceVal().AppendEmpty().SetStringVal("Validation") attrMap.Upsert("application.stages", stages) - subMap := pdata.NewValueMap() + subMap := pcommon.NewValueMap() subMap.MapVal().InsertBool("UIx", false) subMap.MapVal().InsertBool("UI4", true) subMap.MapVal().InsertBool("flow-alt3", false) @@ -383,14 +384,14 @@ func appendMaxCountAttributes(includeStatus bool, attrMap pdata.Map) { attrMap.UpsertString(conventions.AttributeEnduserScope, "email profile administrator") } -func appendSpanEvents(eventCnt PICTInputSpanChild, spanEvents pdata.SpanEventSlice) { +func appendSpanEvents(eventCnt PICTInputSpanChild, spanEvents ptrace.SpanEventSlice) { listSize := calculateListSize(eventCnt) for i := 0; i < listSize; i++ { appendSpanEvent(i, spanEvents) } } -func appendSpanLinks(linkCnt PICTInputSpanChild, random io.Reader, spanLinks pdata.SpanLinkSlice) { +func appendSpanLinks(linkCnt PICTInputSpanChild, random io.Reader, spanLinks ptrace.SpanLinkSlice) { listSize := calculateListSize(linkCnt) for i := 0; i < listSize; i++ { appendSpanLink(random, i, spanLinks) @@ -412,10 +413,10 @@ func calculateListSize(listCnt PICTInputSpanChild) int { } } -func appendSpanEvent(index int, spanEvents pdata.SpanEventSlice) { +func appendSpanEvent(index int, spanEvents ptrace.SpanEventSlice) { spanEvent := spanEvents.AppendEmpty() t := time.Now().Add(-75 * time.Microsecond) - spanEvent.SetTimestamp(pdata.Timestamp(t.UnixNano())) + spanEvent.SetTimestamp(pcommon.Timestamp(t.UnixNano())) switch index % 4 { case 0, 3: spanEvent.SetName("message") @@ -441,7 +442,7 @@ func appendSpanEvent(index int, spanEvents pdata.SpanEventSlice) { spanEvent.SetDroppedAttributesCount(0) } -func appendSpanLink(random io.Reader, index int, spanLinks pdata.SpanLinkSlice) { +func appendSpanLink(random io.Reader, index int, spanLinks ptrace.SpanLinkSlice) { spanLink := spanLinks.AppendEmpty() spanLink.SetTraceID(generateTraceID(random)) spanLink.SetSpanID(generateSpanID(random)) @@ -458,20 +459,20 @@ func appendSpanLink(random io.Reader, index int, spanLinks pdata.SpanLinkSlice) spanLink.SetDroppedAttributesCount(0) } -func generateTraceID(random io.Reader) pdata.TraceID { +func generateTraceID(random io.Reader) pcommon.TraceID { var r [16]byte _, err := random.Read(r[:]) if err != nil { panic(err) } - return pdata.NewTraceID(r) + return pcommon.NewTraceID(r) } -func generateSpanID(random io.Reader) pdata.SpanID { +func generateSpanID(random io.Reader) pcommon.SpanID { var r [8]byte _, err := random.Read(r[:]) if err != nil { panic(err) } - return pdata.NewSpanID(r) + return pcommon.NewSpanID(r) } diff --git a/internal/coreinternal/goldendataset/span_generator_test.go b/internal/coreinternal/goldendataset/span_generator_test.go index 7ba5bc2a16aa..201283bbed62 100644 --- a/internal/coreinternal/goldendataset/span_generator_test.go +++ b/internal/coreinternal/goldendataset/span_generator_test.go @@ -19,7 +19,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestGenerateParentSpan(t *testing.T) { @@ -34,12 +35,12 @@ func TestGenerateParentSpan(t *testing.T) { Links: SpanChildCountOne, Status: SpanStatusOk, } - span := pdata.NewSpan() - fillSpan(traceID, pdata.NewSpanID([8]byte{}), "/gotest-parent", spanInputs, random, span) + span := ptrace.NewSpan() + fillSpan(traceID, pcommon.NewSpanID([8]byte{}), "/gotest-parent", spanInputs, random, span) assert.Equal(t, traceID, span.TraceID()) assert.True(t, span.ParentSpanID().IsEmpty()) assert.Equal(t, 11, span.Attributes().Len()) - assert.Equal(t, pdata.StatusCodeOk, span.Status().Code()) + assert.Equal(t, ptrace.StatusCodeOk, span.Status().Code()) } func TestGenerateChildSpan(t *testing.T) { @@ -55,30 +56,30 @@ func TestGenerateChildSpan(t *testing.T) { Links: SpanChildCountEmpty, Status: SpanStatusOk, } - span := pdata.NewSpan() + span := ptrace.NewSpan() fillSpan(traceID, parentID, "get_test_info", spanInputs, random, span) assert.Equal(t, traceID, span.TraceID()) assert.Equal(t, parentID, span.ParentSpanID()) assert.Equal(t, 12, span.Attributes().Len()) - assert.Equal(t, pdata.StatusCodeOk, span.Status().Code()) + assert.Equal(t, ptrace.StatusCodeOk, span.Status().Code()) } func TestGenerateSpans(t *testing.T) { random := rand.Reader count1 := 16 - spans := pdata.NewSpanSlice() + spans := ptrace.NewSpanSlice() err := appendSpans(count1, "testdata/generated_pict_pairs_spans.txt", random, spans) assert.NoError(t, err) assert.Equal(t, count1, spans.Len()) count2 := 256 - spans = pdata.NewSpanSlice() + spans = ptrace.NewSpanSlice() err = appendSpans(count2, "testdata/generated_pict_pairs_spans.txt", random, spans) assert.NoError(t, err) assert.Equal(t, count2, spans.Len()) count3 := 118 - spans = pdata.NewSpanSlice() + spans = ptrace.NewSpanSlice() err = appendSpans(count3, "testdata/generated_pict_pairs_spans.txt", random, spans) assert.NoError(t, err) assert.Equal(t, count3, spans.Len()) diff --git a/internal/coreinternal/goldendataset/traces_generator.go b/internal/coreinternal/goldendataset/traces_generator.go index c440509c48a2..808cfe239d2e 100644 --- a/internal/coreinternal/goldendataset/traces_generator.go +++ b/internal/coreinternal/goldendataset/traces_generator.go @@ -19,21 +19,22 @@ import ( "io" "math/rand" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) // GenerateTraces generates a slice of OTLP ResourceSpans objects based on the PICT-generated pairwise // parameters defined in the parameters file specified by the tracePairsFile parameter. The pairs to generate // spans for for defined in the file specified by the spanPairsFile parameter. // The slice of ResourceSpans are returned. If an err is returned, the slice elements will be nil. -func GenerateTraces(tracePairsFile string, spanPairsFile string) ([]pdata.Traces, error) { +func GenerateTraces(tracePairsFile string, spanPairsFile string) ([]ptrace.Traces, error) { random := io.Reader(rand.New(rand.NewSource(42))) pairsData, err := loadPictOutputFile(tracePairsFile) if err != nil { return nil, err } pairsTotal := len(pairsData) - 1 - traces := make([]pdata.Traces, pairsTotal) + traces := make([]ptrace.Traces, pairsTotal) for index, values := range pairsData { if index == 0 { continue @@ -43,7 +44,7 @@ func GenerateTraces(tracePairsFile string, spanPairsFile string) ([]pdata.Traces InstrumentationLibrary: PICTInputInstrumentationLibrary(values[TracesColumnInstrumentationLibrary]), Spans: PICTInputSpans(values[TracesColumnSpans]), } - traces[index-1] = pdata.NewTraces() + traces[index-1] = ptrace.NewTraces() spanErr := appendResourceSpan(tracingInputs, spanPairsFile, random, traces[index-1].ResourceSpans()) if spanErr != nil { return nil, err @@ -59,7 +60,7 @@ func GenerateTraces(tracePairsFile string, spanPairsFile string) ([]pdata.Traces // // The generated resource spans. If err is not nil, some or all of the resource spans fields will be nil. func appendResourceSpan(tracingInputs *PICTTracingInputs, spanPairsFile string, - random io.Reader, resourceSpansSlice pdata.ResourceSpansSlice) error { + random io.Reader, resourceSpansSlice ptrace.ResourceSpansSlice) error { resourceSpan := resourceSpansSlice.AppendEmpty() err := appendScopeSpans(tracingInputs, spanPairsFile, random, resourceSpan.ScopeSpans()) if err != nil { @@ -70,7 +71,7 @@ func appendResourceSpan(tracingInputs *PICTTracingInputs, spanPairsFile string, } func appendScopeSpans(tracingInputs *PICTTracingInputs, spanPairsFile string, - random io.Reader, scopeSpansSlice pdata.ScopeSpansSlice) error { + random io.Reader, scopeSpansSlice ptrace.ScopeSpansSlice) error { var count int switch tracingInputs.InstrumentationLibrary { case LibraryNone: @@ -89,7 +90,7 @@ func appendScopeSpans(tracingInputs *PICTTracingInputs, spanPairsFile string, return nil } -func fillScopeSpans(tracingInputs *PICTTracingInputs, index int, spanPairsFile string, random io.Reader, scopeSpans pdata.ScopeSpans) error { +func fillScopeSpans(tracingInputs *PICTTracingInputs, index int, spanPairsFile string, random io.Reader, scopeSpans ptrace.ScopeSpans) error { spanCaseCount, err := countTotalSpanCases(spanPairsFile) if err != nil { return err @@ -118,7 +119,7 @@ func countTotalSpanCases(spanPairsFile string) (int, error) { return count, err } -func fillInstrumentationLibrary(tracingInputs *PICTTracingInputs, index int, instrumentationLibrary pdata.InstrumentationScope) { +func fillInstrumentationLibrary(tracingInputs *PICTTracingInputs, index int, instrumentationLibrary pcommon.InstrumentationScope) { if tracingInputs.InstrumentationLibrary == LibraryNone { return } diff --git a/internal/coreinternal/idutils/big_endian_converter.go b/internal/coreinternal/idutils/big_endian_converter.go index 23006bf8c00c..4dbbd4463e47 100644 --- a/internal/coreinternal/idutils/big_endian_converter.go +++ b/internal/coreinternal/idutils/big_endian_converter.go @@ -17,32 +17,32 @@ package idutils // import "github.com/open-telemetry/opentelemetry-collector-con import ( "encoding/binary" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) -// UInt64ToTraceID converts the pair of uint64 representation of a TraceID to pdata.TraceID. -func UInt64ToTraceID(high, low uint64) pdata.TraceID { +// UInt64ToTraceID converts the pair of uint64 representation of a TraceID to pcommon.TraceID. +func UInt64ToTraceID(high, low uint64) pcommon.TraceID { traceID := [16]byte{} binary.BigEndian.PutUint64(traceID[:8], high) binary.BigEndian.PutUint64(traceID[8:], low) - return pdata.NewTraceID(traceID) + return pcommon.NewTraceID(traceID) } -// TraceIDToUInt64Pair converts the pdata.TraceID to a pair of uint64 representation. -func TraceIDToUInt64Pair(traceID pdata.TraceID) (uint64, uint64) { +// TraceIDToUInt64Pair converts the pcommon.TraceID to a pair of uint64 representation. +func TraceIDToUInt64Pair(traceID pcommon.TraceID) (uint64, uint64) { bytes := traceID.Bytes() return binary.BigEndian.Uint64(bytes[:8]), binary.BigEndian.Uint64(bytes[8:]) } -// UInt64ToSpanID converts the uint64 representation of a SpanID to pdata.SpanID. -func UInt64ToSpanID(id uint64) pdata.SpanID { +// UInt64ToSpanID converts the uint64 representation of a SpanID to pcommon.SpanID. +func UInt64ToSpanID(id uint64) pcommon.SpanID { spanID := [8]byte{} binary.BigEndian.PutUint64(spanID[:], id) - return pdata.NewSpanID(spanID) + return pcommon.NewSpanID(spanID) } -// SpanIDToUInt64 converts the pdata.SpanID to uint64 representation. -func SpanIDToUInt64(spanID pdata.SpanID) uint64 { +// SpanIDToUInt64 converts the pcommon.SpanID to uint64 representation. +func SpanIDToUInt64(spanID pcommon.SpanID) uint64 { bytes := spanID.Bytes() return binary.BigEndian.Uint64(bytes[:]) } diff --git a/internal/coreinternal/idutils/big_endian_converter_test.go b/internal/coreinternal/idutils/big_endian_converter_test.go index b44d544598ce..baedaecbb9a2 100644 --- a/internal/coreinternal/idutils/big_endian_converter_test.go +++ b/internal/coreinternal/idutils/big_endian_converter_test.go @@ -19,44 +19,44 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestUInt64ToTraceIDConversion(t *testing.T) { assert.Equal(t, - pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + pcommon.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), UInt64ToTraceID(0, 0), "Failed 0 conversion:") assert.Equal(t, - pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}), + pcommon.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01}), UInt64ToTraceID(256*256+256+1, 256+1), "Failed simple conversion:") assert.Equal(t, - pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}), + pcommon.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}), UInt64ToTraceID(0, 5), "Failed to convert 0 high:") assert.Equal(t, UInt64ToTraceID(5, 0), - pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + pcommon.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), UInt64ToTraceID(5, 0), "Failed to convert 0 low:") assert.Equal(t, - pdata.NewTraceID([16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}), + pcommon.NewTraceID([16]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05}), UInt64ToTraceID(math.MaxUint64, 5), "Failed to convert MaxUint64:") } func TestUInt64ToSpanIDConversion(t *testing.T) { assert.Equal(t, - pdata.NewSpanID([8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), + pcommon.NewSpanID([8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}), UInt64ToSpanID(0), "Failed 0 conversion:") assert.Equal(t, - pdata.NewSpanID([8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01}), + pcommon.NewSpanID([8]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01}), UInt64ToSpanID(256*256+256+1), "Failed simple conversion:") assert.Equal(t, - pdata.NewSpanID([8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}), + pcommon.NewSpanID([8]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}), UInt64ToSpanID(math.MaxUint64), "Failed to convert MaxUint64:") } diff --git a/internal/coreinternal/processor/filterexpr/matcher.go b/internal/coreinternal/processor/filterexpr/matcher.go index 568d311b706b..13776c27258f 100644 --- a/internal/coreinternal/processor/filterexpr/matcher.go +++ b/internal/coreinternal/processor/filterexpr/matcher.go @@ -17,7 +17,8 @@ package filterexpr // import "github.com/open-telemetry/opentelemetry-collector- import ( "github.com/antonmedv/expr" "github.com/antonmedv/expr/vm" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type Matcher struct { @@ -27,7 +28,7 @@ type Matcher struct { type env struct { MetricName string - attributes pdata.Map + attributes pcommon.Map } func (e *env) HasLabel(key string) bool { @@ -48,21 +49,21 @@ func NewMatcher(expression string) (*Matcher, error) { return &Matcher{program: program, v: vm.VM{}}, nil } -func (m *Matcher) MatchMetric(metric pdata.Metric) (bool, error) { +func (m *Matcher) MatchMetric(metric pmetric.Metric) (bool, error) { metricName := metric.Name() switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return m.matchGauge(metricName, metric.Gauge()) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return m.matchSum(metricName, metric.Sum()) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return m.matchDoubleHistogram(metricName, metric.Histogram()) default: return false, nil } } -func (m *Matcher) matchGauge(metricName string, gauge pdata.Gauge) (bool, error) { +func (m *Matcher) matchGauge(metricName string, gauge pmetric.Gauge) (bool, error) { pts := gauge.DataPoints() for i := 0; i < pts.Len(); i++ { matched, err := m.matchEnv(metricName, pts.At(i).Attributes()) @@ -76,7 +77,7 @@ func (m *Matcher) matchGauge(metricName string, gauge pdata.Gauge) (bool, error) return false, nil } -func (m *Matcher) matchSum(metricName string, sum pdata.Sum) (bool, error) { +func (m *Matcher) matchSum(metricName string, sum pmetric.Sum) (bool, error) { pts := sum.DataPoints() for i := 0; i < pts.Len(); i++ { matched, err := m.matchEnv(metricName, pts.At(i).Attributes()) @@ -90,7 +91,7 @@ func (m *Matcher) matchSum(metricName string, sum pdata.Sum) (bool, error) { return false, nil } -func (m *Matcher) matchDoubleHistogram(metricName string, histogram pdata.Histogram) (bool, error) { +func (m *Matcher) matchDoubleHistogram(metricName string, histogram pmetric.Histogram) (bool, error) { pts := histogram.DataPoints() for i := 0; i < pts.Len(); i++ { matched, err := m.matchEnv(metricName, pts.At(i).Attributes()) @@ -104,11 +105,11 @@ func (m *Matcher) matchDoubleHistogram(metricName string, histogram pdata.Histog return false, nil } -func (m *Matcher) matchEnv(metricName string, attributes pdata.Map) (bool, error) { +func (m *Matcher) matchEnv(metricName string, attributes pcommon.Map) (bool, error) { return m.match(createEnv(metricName, attributes)) } -func createEnv(metricName string, attributes pdata.Map) *env { +func createEnv(metricName string, attributes pcommon.Map) *env { return &env{ MetricName: metricName, attributes: attributes, diff --git a/internal/coreinternal/processor/filterexpr/matcher_test.go b/internal/coreinternal/processor/filterexpr/matcher_test.go index e5d27d6ec140..4829ab0017ff 100644 --- a/internal/coreinternal/processor/filterexpr/matcher_test.go +++ b/internal/coreinternal/processor/filterexpr/matcher_test.go @@ -19,7 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestCompileExprError(t *testing.T) { @@ -37,7 +38,7 @@ func TestRunExprError(t *testing.T) { func TestUnknownDataType(t *testing.T) { matcher, err := NewMatcher(`MetricName == 'my.metric'`) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("my.metric") m.SetDataType(-1) matched, err := matcher.MatchMetric(m) @@ -46,21 +47,21 @@ func TestUnknownDataType(t *testing.T) { } func TestEmptyGauge(t *testing.T) { - testEmptyValue(t, pdata.MetricDataTypeGauge) + testEmptyValue(t, pmetric.MetricDataTypeGauge) } func TestEmptySum(t *testing.T) { - testEmptyValue(t, pdata.MetricDataTypeSum) + testEmptyValue(t, pmetric.MetricDataTypeSum) } func TestEmptyHistogram(t *testing.T) { - testEmptyValue(t, pdata.MetricDataTypeHistogram) + testEmptyValue(t, pmetric.MetricDataTypeHistogram) } -func testEmptyValue(t *testing.T, dataType pdata.MetricDataType) { +func testEmptyValue(t *testing.T, dataType pmetric.MetricDataType) { matcher, err := NewMatcher(`MetricName == 'my.metric'`) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("my.metric") m.SetDataType(dataType) matched, err := matcher.MatchMetric(m) @@ -71,9 +72,9 @@ func testEmptyValue(t *testing.T, dataType pdata.MetricDataType) { func TestGaugeEmptyDataPoint(t *testing.T) { matcher, err := NewMatcher(`MetricName == 'my.metric'`) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("my.metric") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) m.Gauge().DataPoints().AppendEmpty() matched, err := matcher.MatchMetric(m) assert.NoError(t, err) @@ -83,9 +84,9 @@ func TestGaugeEmptyDataPoint(t *testing.T) { func TestSumEmptyDataPoint(t *testing.T) { matcher, err := NewMatcher(`MetricName == 'my.metric'`) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("my.metric") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().DataPoints().AppendEmpty() matched, err := matcher.MatchMetric(m) assert.NoError(t, err) @@ -95,9 +96,9 @@ func TestSumEmptyDataPoint(t *testing.T) { func TestHistogramEmptyDataPoint(t *testing.T) { matcher, err := NewMatcher(`MetricName == 'my.metric'`) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("my.metric") - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) m.Histogram().DataPoints().AppendEmpty() matched, err := matcher.MatchMetric(m) assert.NoError(t, err) @@ -109,9 +110,9 @@ func TestMatchIntGaugeDataPointByMetricAndSecondPointLabelValue(t *testing.T) { `MetricName == 'my.metric' && Label("baz") == "glarch"`, ) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName("my.metric") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dps := m.Gauge().DataPoints() dps.AppendEmpty().Attributes().InsertString("foo", "bar") @@ -155,13 +156,13 @@ func TestNonMatchGaugeDataPointByMetricAndLabelValue(t *testing.T) { func testMatchGauge(t *testing.T, metricName, expression string, lbls map[string]interface{}) bool { matcher, err := NewMatcher(expression) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName(metricName) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dps := m.Gauge().DataPoints() pt := dps.AppendEmpty() if lbls != nil { - pdata.NewMapFromRaw(lbls).CopyTo(pt.Attributes()) + pcommon.NewMapFromRaw(lbls).CopyTo(pt.Attributes()) } match, err := matcher.MatchMetric(m) assert.NoError(t, err) @@ -179,9 +180,9 @@ func TestNonMatchSumByMetricName(t *testing.T) { func matchSum(t *testing.T, metricName string) bool { matcher, err := NewMatcher(`MetricName == 'my.metric'`) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName(metricName) - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) dps := m.Sum().DataPoints() dps.AppendEmpty() matched, err := matcher.MatchMetric(m) @@ -200,9 +201,9 @@ func TestNonMatchHistogramByMetricName(t *testing.T) { func matchHistogram(t *testing.T, metricName string) bool { matcher, err := NewMatcher(`MetricName == 'my.metric'`) require.NoError(t, err) - m := pdata.NewMetric() + m := pmetric.NewMetric() m.SetName(metricName) - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) dps := m.Histogram().DataPoints() dps.AppendEmpty() matched, err := matcher.MatchMetric(m) diff --git a/internal/coreinternal/processor/filterhelper/filterhelper.go b/internal/coreinternal/processor/filterhelper/filterhelper.go index f623e6cbe1d1..0e86f42600f9 100644 --- a/internal/coreinternal/processor/filterhelper/filterhelper.go +++ b/internal/coreinternal/processor/filterhelper/filterhelper.go @@ -18,22 +18,22 @@ import ( "fmt" "github.com/spf13/cast" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) // NewAttributeValueRaw is used to convert the raw `value` from ActionKeyValue to the supported trace attribute values. // If error different than nil the return value is invalid. Calling any functions on the invalid value will cause a panic. -func NewAttributeValueRaw(value interface{}) (pdata.Value, error) { +func NewAttributeValueRaw(value interface{}) (pcommon.Value, error) { switch val := value.(type) { case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: - return pdata.NewValueInt(cast.ToInt64(val)), nil + return pcommon.NewValueInt(cast.ToInt64(val)), nil case float32, float64: - return pdata.NewValueDouble(cast.ToFloat64(val)), nil + return pcommon.NewValueDouble(cast.ToFloat64(val)), nil case string: - return pdata.NewValueString(val), nil + return pcommon.NewValueString(val), nil case bool: - return pdata.NewValueBool(val), nil + return pcommon.NewValueBool(val), nil default: - return pdata.Value{}, fmt.Errorf("error unsupported value type \"%T\"", value) + return pcommon.Value{}, fmt.Errorf("error unsupported value type \"%T\"", value) } } diff --git a/internal/coreinternal/processor/filterhelper/filterhelper_test.go b/internal/coreinternal/processor/filterhelper/filterhelper_test.go index c37e4f5ccdf5..ad9d78d7069a 100644 --- a/internal/coreinternal/processor/filterhelper/filterhelper_test.go +++ b/internal/coreinternal/processor/filterhelper/filterhelper_test.go @@ -18,40 +18,40 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestHelper_AttributeValue(t *testing.T) { val, err := NewAttributeValueRaw(uint8(123)) - assert.Equal(t, pdata.NewValueInt(123), val) + assert.Equal(t, pcommon.NewValueInt(123), val) assert.NoError(t, err) val, err = NewAttributeValueRaw(uint16(123)) - assert.Equal(t, pdata.NewValueInt(123), val) + assert.Equal(t, pcommon.NewValueInt(123), val) assert.NoError(t, err) val, err = NewAttributeValueRaw(int8(123)) - assert.Equal(t, pdata.NewValueInt(123), val) + assert.Equal(t, pcommon.NewValueInt(123), val) assert.NoError(t, err) val, err = NewAttributeValueRaw(int16(123)) - assert.Equal(t, pdata.NewValueInt(123), val) + assert.Equal(t, pcommon.NewValueInt(123), val) assert.NoError(t, err) val, err = NewAttributeValueRaw(float32(234.129312)) - assert.Equal(t, pdata.NewValueDouble(float64(float32(234.129312))), val) + assert.Equal(t, pcommon.NewValueDouble(float64(float32(234.129312))), val) assert.NoError(t, err) val, err = NewAttributeValueRaw(234.129312) - assert.Equal(t, pdata.NewValueDouble(234.129312), val) + assert.Equal(t, pcommon.NewValueDouble(234.129312), val) assert.NoError(t, err) val, err = NewAttributeValueRaw(true) - assert.Equal(t, pdata.NewValueBool(true), val) + assert.Equal(t, pcommon.NewValueBool(true), val) assert.NoError(t, err) val, err = NewAttributeValueRaw("bob the builder") - assert.Equal(t, pdata.NewValueString("bob the builder"), val) + assert.Equal(t, pcommon.NewValueString("bob the builder"), val) assert.NoError(t, err) _, err = NewAttributeValueRaw(nil) diff --git a/internal/coreinternal/processor/filterlog/filterlog.go b/internal/coreinternal/processor/filterlog/filterlog.go index 65cd246146b6..f63e551e9211 100644 --- a/internal/coreinternal/processor/filterlog/filterlog.go +++ b/internal/coreinternal/processor/filterlog/filterlog.go @@ -17,7 +17,8 @@ package filterlog // import "github.com/open-telemetry/opentelemetry-collector-c import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filtermatcher" @@ -29,7 +30,7 @@ import ( // TODO: Modify Matcher to invoke both the include and exclude properties so // calling processors will always have the same logic. type Matcher interface { - MatchLogRecord(lr pdata.LogRecord, resource pdata.Resource, library pdata.InstrumentationScope) bool + MatchLogRecord(lr plog.LogRecord, resource pcommon.Resource, library pcommon.InstrumentationScope) bool } // propertiesMatcher allows matching a log record against various log record properties. @@ -77,8 +78,8 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { // At least one of log record names or attributes must be specified. It is // supported to have more than one of these specified, and all specified must // evaluate to true for a match to occur. -func (mp *propertiesMatcher) MatchLogRecord(lr pdata.LogRecord, resource pdata.Resource, library pdata.InstrumentationScope) bool { - if lr.Body().Type() == pdata.ValueTypeString && mp.bodyFilters != nil && mp.bodyFilters.Matches(lr.Body().StringVal()) { +func (mp *propertiesMatcher) MatchLogRecord(lr plog.LogRecord, resource pcommon.Resource, library pcommon.InstrumentationScope) bool { + if lr.Body().Type() == pcommon.ValueTypeString && mp.bodyFilters != nil && mp.bodyFilters.Matches(lr.Body().StringVal()) { return true } diff --git a/internal/coreinternal/processor/filterlog/filterlog_test.go b/internal/coreinternal/processor/filterlog/filterlog_test.go index db6f8c243add..caf234f4e868 100644 --- a/internal/coreinternal/processor/filterlog/filterlog_test.go +++ b/internal/coreinternal/processor/filterlog/filterlog_test.go @@ -19,7 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -116,14 +117,14 @@ func TestLogRecord_Matching_False(t *testing.T) { }, } - lr := pdata.NewLogRecord() + lr := plog.NewLogRecord() for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { matcher, err := NewMatcher(tc.properties) assert.Nil(t, err) require.NotNil(t, matcher) - assert.False(t, matcher.MatchLogRecord(lr, pdata.Resource{}, pdata.InstrumentationScope{})) + assert.False(t, matcher.MatchLogRecord(lr, pcommon.Resource{}, pcommon.InstrumentationScope{})) }) } } @@ -158,7 +159,7 @@ func TestLogRecord_Matching_True(t *testing.T) { }, } - lr := pdata.NewLogRecord() + lr := plog.NewLogRecord() lr.Attributes().InsertString("abc", "def") lr.Body().SetStringVal("AUTHENTICATION FAILED") @@ -169,7 +170,7 @@ func TestLogRecord_Matching_True(t *testing.T) { require.NotNil(t, mp) assert.NotNil(t, lr) - assert.True(t, mp.MatchLogRecord(lr, pdata.Resource{}, pdata.InstrumentationScope{})) + assert.True(t, mp.MatchLogRecord(lr, pcommon.Resource{}, pcommon.InstrumentationScope{})) }) } } diff --git a/internal/coreinternal/processor/filtermatcher/attributematcher.go b/internal/coreinternal/processor/filtermatcher/attributematcher.go index f8a2ac56a659..af0c5a81910b 100644 --- a/internal/coreinternal/processor/filtermatcher/attributematcher.go +++ b/internal/coreinternal/processor/filtermatcher/attributematcher.go @@ -19,7 +19,7 @@ import ( "fmt" "strconv" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterhelper" @@ -32,7 +32,7 @@ type AttributesMatcher []AttributeMatcher type AttributeMatcher struct { Key string // If both AttributeValue and StringFilter are nil only check for key existence. - AttributeValue *pdata.Value + AttributeValue *pcommon.Value // StringFilter is needed to match against a regular expression StringFilter filterset.FilterSet } @@ -58,7 +58,7 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att } if config.MatchType == filterset.Regexp { - if val.Type() != pdata.ValueTypeString { + if val.Type() != pcommon.ValueTypeString { return nil, fmt.Errorf( "%s=%s for %q only supports STRING, but found %s", filterset.MatchTypeFieldName, filterset.Regexp, attribute.Key, val.Type(), @@ -83,7 +83,7 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att } // Match attributes specification against a span/log. -func (ma AttributesMatcher) Match(attrs pdata.Map) bool { +func (ma AttributesMatcher) Match(attrs pcommon.Map) bool { // If there are no attributes to match against, the span/log matches. if len(ma) == 0 { return true @@ -116,15 +116,15 @@ func (ma AttributesMatcher) Match(attrs pdata.Map) bool { return true } -func attributeStringValue(attr pdata.Value) (string, error) { +func attributeStringValue(attr pcommon.Value) (string, error) { switch attr.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return attr.StringVal(), nil - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return strconv.FormatBool(attr.BoolVal()), nil - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64), nil - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return strconv.FormatInt(attr.IntVal(), 10), nil default: return "", errUnexpectedAttributeType diff --git a/internal/coreinternal/processor/filtermatcher/filtermatcher.go b/internal/coreinternal/processor/filtermatcher/filtermatcher.go index a517ba9d1eb9..e018a2fc6c24 100644 --- a/internal/coreinternal/processor/filtermatcher/filtermatcher.go +++ b/internal/coreinternal/processor/filtermatcher/filtermatcher.go @@ -17,7 +17,7 @@ package filtermatcher // import "github.com/open-telemetry/opentelemetry-collect import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -86,7 +86,7 @@ func NewMatcher(mp *filterconfig.MatchProperties) (PropertiesMatcher, error) { } // Match matches a span or log to a set of properties. -func (mp *PropertiesMatcher) Match(attributes pdata.Map, resource pdata.Resource, library pdata.InstrumentationScope) bool { +func (mp *PropertiesMatcher) Match(attributes pcommon.Map, resource pcommon.Resource, library pcommon.InstrumentationScope) bool { for _, matcher := range mp.libraries { if !matcher.Name.Matches(library.Name()) { return false diff --git a/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go b/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go index e58d685cbcbb..b82eb0baaae6 100644 --- a/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go +++ b/internal/coreinternal/processor/filtermatcher/filtermatcher_test.go @@ -19,8 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -204,12 +204,12 @@ func Test_Matching_False(t *testing.T) { }, } - atts := pdata.NewMapFromRaw(map[string]interface{}{ + atts := pcommon.NewMapFromRaw(map[string]interface{}{ "keyInt": 123, "keyMap": map[string]interface{}{}, }) - library := pdata.NewInstrumentationScope() + library := pcommon.NewInstrumentationScope() library.SetName("lib") library.SetVersion("ver") @@ -239,7 +239,7 @@ func Test_MatchingCornerCases(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, mp) - assert.False(t, mp.Match(pdata.NewMap(), resource("svcA"), pdata.NewInstrumentationScope())) + assert.False(t, mp.Match(pcommon.NewMap(), resource("svcA"), pcommon.NewInstrumentationScope())) } func Test_Matching_True(t *testing.T) { @@ -358,7 +358,7 @@ func Test_Matching_True(t *testing.T) { }, } - atts := pdata.NewMapFromRaw(map[string]interface{}{ + atts := pcommon.NewMapFromRaw(map[string]interface{}{ "keyString": "arithmetic", "keyInt": 123, "keyDouble": 3245.6, @@ -366,11 +366,11 @@ func Test_Matching_True(t *testing.T) { "keyExists": "present", }) - resource := pdata.NewResource() + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeServiceName, "svcA") resource.Attributes().InsertString("resString", "arithmetic") - library := pdata.NewInstrumentationScope() + library := pcommon.NewInstrumentationScope() library.SetName("lib") library.SetVersion("ver") @@ -385,8 +385,8 @@ func Test_Matching_True(t *testing.T) { } } -func resource(service string) pdata.Resource { - r := pdata.NewResource() +func resource(service string) pcommon.Resource { + r := pcommon.NewResource() r.Attributes().InsertString(conventions.AttributeServiceName, service) return r } diff --git a/internal/coreinternal/processor/filtermetric/config.go b/internal/coreinternal/processor/filtermetric/config.go index 59a70cb8e34a..725a2a66d1c6 100644 --- a/internal/coreinternal/processor/filtermetric/config.go +++ b/internal/coreinternal/processor/filtermetric/config.go @@ -20,15 +20,15 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset/regexp" ) -// MatchType specifies the strategy for matching against `pdata.Metric`s. This +// MatchType specifies the strategy for matching against `pmetric.Metric`s. This // is distinct from filterset.MatchType which matches against metric (and // tracing) names only. To support matching against metric names and -// `pdata.Metric`s, filtermetric.MatchType is effectively a superset of +// `pmetric.Metric`s, filtermetric.MatchType is effectively a superset of // filterset.MatchType. type MatchType string // These are the MatchTypes that users can specify for filtering -// `pdata.Metric`s. +// `pmetric.Metric`s. const ( Regexp = MatchType(filterset.Regexp) Strict = MatchType(filterset.Strict) diff --git a/internal/coreinternal/processor/filtermetric/expr_matcher.go b/internal/coreinternal/processor/filtermetric/expr_matcher.go index d39467315770..ce28ab993fa4 100644 --- a/internal/coreinternal/processor/filtermetric/expr_matcher.go +++ b/internal/coreinternal/processor/filtermetric/expr_matcher.go @@ -15,7 +15,7 @@ package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filtermetric" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterexpr" ) @@ -36,7 +36,7 @@ func newExprMatcher(expressions []string) (*exprMatcher, error) { return m, nil } -func (m *exprMatcher) MatchMetric(metric pdata.Metric) (bool, error) { +func (m *exprMatcher) MatchMetric(metric pmetric.Metric) (bool, error) { for _, matcher := range m.matchers { matched, err := matcher.MatchMetric(metric) if err != nil { diff --git a/internal/coreinternal/processor/filtermetric/filtermetric.go b/internal/coreinternal/processor/filtermetric/filtermetric.go index b656f5331c96..73528bb615a8 100644 --- a/internal/coreinternal/processor/filtermetric/filtermetric.go +++ b/internal/coreinternal/processor/filtermetric/filtermetric.go @@ -15,12 +15,12 @@ package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filtermetric" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) type Matcher interface { - MatchMetric(metric pdata.Metric) (bool, error) + MatchMetric(metric pmetric.Metric) (bool, error) } // NewMatcher constructs a metric Matcher. If an 'expr' match type is specified, @@ -40,7 +40,7 @@ func NewMatcher(config *MatchProperties) (Matcher, error) { // The default is to not skip. If include is defined, the metric must match or it will be skipped. // If include is not defined but exclude is, metric will be skipped if it matches exclude. Metric // is included if neither specified. -func SkipMetric(include, exclude Matcher, metric pdata.Metric, logger *zap.Logger) bool { +func SkipMetric(include, exclude Matcher, metric pmetric.Metric, logger *zap.Logger) bool { if include != nil { // A false (or an error) returned in this case means the metric should not be processed. i, err := include.MatchMetric(metric) diff --git a/internal/coreinternal/processor/filtermetric/filtermetric_test.go b/internal/coreinternal/processor/filtermetric/filtermetric_test.go index d6319586a62a..a5b62ee89a7c 100644 --- a/internal/coreinternal/processor/filtermetric/filtermetric_test.go +++ b/internal/coreinternal/processor/filtermetric/filtermetric_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" ) @@ -42,8 +42,8 @@ var ( } ) -func createMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func createMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) return metric } @@ -52,7 +52,7 @@ func TestMatcherMatches(t *testing.T) { tests := []struct { name string cfg *MatchProperties - metric pdata.Metric + metric pmetric.Metric shouldMatch bool }{ { diff --git a/internal/coreinternal/processor/filtermetric/name_matcher.go b/internal/coreinternal/processor/filtermetric/name_matcher.go index 4b0efcd67909..91ec95eec3a5 100644 --- a/internal/coreinternal/processor/filtermetric/name_matcher.go +++ b/internal/coreinternal/processor/filtermetric/name_matcher.go @@ -15,7 +15,7 @@ package filtermetric // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filtermetric" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" ) @@ -43,6 +43,6 @@ func newNameMatcher(config *MatchProperties) (*nameMatcher, error) { // MatchMetric matches a metric using the metric properties configured on the nameMatcher. // A metric only matches if every metric property configured on the nameMatcher is a match. -func (m *nameMatcher) MatchMetric(metric pdata.Metric) (bool, error) { +func (m *nameMatcher) MatchMetric(metric pmetric.Metric) (bool, error) { return m.nameFilters.Matches(metric.Name()), nil } diff --git a/internal/coreinternal/processor/filterspan/filterspan.go b/internal/coreinternal/processor/filterspan/filterspan.go index eb1ed585964a..846de9e5e68a 100644 --- a/internal/coreinternal/processor/filterspan/filterspan.go +++ b/internal/coreinternal/processor/filterspan/filterspan.go @@ -17,8 +17,9 @@ package filterspan // import "github.com/open-telemetry/opentelemetry-collector- import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filtermatcher" @@ -30,7 +31,7 @@ import ( // TODO: Modify Matcher to invoke both the include and exclude properties so // calling processors will always have the same logic. type Matcher interface { - MatchSpan(span pdata.Span, resource pdata.Resource, library pdata.InstrumentationScope) bool + MatchSpan(span ptrace.Span, resource pcommon.Resource, library pcommon.InstrumentationScope) bool } // propertiesMatcher allows matching a span against various span properties. @@ -88,7 +89,7 @@ func NewMatcher(mp *filterconfig.MatchProperties) (Matcher, error) { // The logic determining if a span should be processed is set // in the attribute configuration with the include and exclude settings. // Include properties are checked before exclude settings are checked. -func SkipSpan(include Matcher, exclude Matcher, span pdata.Span, resource pdata.Resource, library pdata.InstrumentationScope) bool { +func SkipSpan(include Matcher, exclude Matcher, span ptrace.Span, resource pcommon.Resource, library pcommon.InstrumentationScope) bool { if include != nil { // A false returned in this case means the span should not be processed. if i := include.MatchSpan(span, resource, library); !i { @@ -108,7 +109,7 @@ func SkipSpan(include Matcher, exclude Matcher, span pdata.Span, resource pdata. // MatchSpan matches a span and service to a set of properties. // see filterconfig.MatchProperties for more details -func (mp *propertiesMatcher) MatchSpan(span pdata.Span, resource pdata.Resource, library pdata.InstrumentationScope) bool { +func (mp *propertiesMatcher) MatchSpan(span ptrace.Span, resource pcommon.Resource, library pcommon.InstrumentationScope) bool { // If a set of properties was not in the mp, all spans are considered to match on that property if mp.serviceFilters != nil { serviceName := serviceNameForResource(resource) @@ -125,7 +126,7 @@ func (mp *propertiesMatcher) MatchSpan(span pdata.Span, resource pdata.Resource, } // serviceNameForResource gets the service name for a specified Resource. -func serviceNameForResource(resource pdata.Resource) string { +func serviceNameForResource(resource pcommon.Resource) string { service, found := resource.Attributes().Get(conventions.AttributeServiceName) if !found { return "" diff --git a/internal/coreinternal/processor/filterspan/filterspan_test.go b/internal/coreinternal/processor/filterspan/filterspan_test.go index 1893a655188c..f680f4ee9089 100644 --- a/internal/coreinternal/processor/filterspan/filterspan_test.go +++ b/internal/coreinternal/processor/filterspan/filterspan_test.go @@ -19,8 +19,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -145,10 +146,10 @@ func TestSpan_Matching_False(t *testing.T) { }, } - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("spanName") - library := pdata.NewInstrumentationScope() - resource := pdata.NewResource() + library := pcommon.NewInstrumentationScope() + resource := pcommon.NewResource() for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { @@ -171,8 +172,8 @@ func TestSpan_MissingServiceName(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, mp) - emptySpan := pdata.NewSpan() - assert.False(t, mp.MatchSpan(emptySpan, pdata.NewResource(), pdata.NewInstrumentationScope())) + emptySpan := ptrace.NewSpan() + assert.False(t, mp.MatchSpan(emptySpan, pcommon.NewResource(), pcommon.NewInstrumentationScope())) } func TestSpan_Matching_True(t *testing.T) { @@ -219,7 +220,7 @@ func TestSpan_Matching_True(t *testing.T) { }, } - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("spanName") span.Attributes().InsertString("keyString", "arithmetic") span.Attributes().InsertInt("keyInt", 123) @@ -228,10 +229,10 @@ func TestSpan_Matching_True(t *testing.T) { span.Attributes().InsertString("keyExists", "present") assert.NotNil(t, span) - resource := pdata.NewResource() + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeServiceName, "svcA") - library := pdata.NewInstrumentationScope() + library := pcommon.NewInstrumentationScope() for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { diff --git a/internal/coreinternal/testdata/common.go b/internal/coreinternal/testdata/common.go index 46e62c11908b..7e4e20a798c6 100644 --- a/internal/coreinternal/testdata/common.go +++ b/internal/coreinternal/testdata/common.go @@ -14,9 +14,7 @@ package testdata -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/pdata/pcommon" var ( resourceAttributes1 = map[string]interface{}{"resource-attr": "resource-attr-val-1"} @@ -37,46 +35,46 @@ const ( TestAttachmentValue = "exemplar-attachment-value" ) -func initResourceAttributes1(dest pdata.Map) { - pdata.NewMapFromRaw(resourceAttributes1).CopyTo(dest) +func initResourceAttributes1(dest pcommon.Map) { + pcommon.NewMapFromRaw(resourceAttributes1).CopyTo(dest) } -func initResourceAttributes2(dest pdata.Map) { - pdata.NewMapFromRaw(resourceAttributes2).CopyTo(dest) +func initResourceAttributes2(dest pcommon.Map) { + pcommon.NewMapFromRaw(resourceAttributes2).CopyTo(dest) } -func initSpanAttributes(dest pdata.Map) { - pdata.NewMapFromRaw(spanAttributes).CopyTo(dest) +func initSpanAttributes(dest pcommon.Map) { + pcommon.NewMapFromRaw(spanAttributes).CopyTo(dest) } -func initSpanEventAttributes(dest pdata.Map) { - pdata.NewMapFromRaw(spanEventAttributes).CopyTo(dest) +func initSpanEventAttributes(dest pcommon.Map) { + pcommon.NewMapFromRaw(spanEventAttributes).CopyTo(dest) } -func initSpanLinkAttributes(dest pdata.Map) { - pdata.NewMapFromRaw(spanLinkAttributes).CopyTo(dest) +func initSpanLinkAttributes(dest pcommon.Map) { + pcommon.NewMapFromRaw(spanLinkAttributes).CopyTo(dest) } -func initMetricAttachment(dest pdata.Map) { +func initMetricAttachment(dest pcommon.Map) { dest.UpsertString(TestAttachmentKey, TestAttachmentValue) } -func initMetricAttributes1(dest pdata.Map) { +func initMetricAttributes1(dest pcommon.Map) { dest.UpsertString(TestLabelKey1, TestLabelValue1) } -func initMetricAttributes12(dest pdata.Map) { +func initMetricAttributes12(dest pcommon.Map) { dest.UpsertString(TestLabelKey1, TestLabelValue1) dest.UpsertString(TestLabelKey2, TestLabelValue2) dest.Sort() } -func initMetricAttributes13(dest pdata.Map) { +func initMetricAttributes13(dest pcommon.Map) { dest.UpsertString(TestLabelKey1, TestLabelValue1) dest.UpsertString(TestLabelKey3, TestLabelValue3) dest.Sort() } -func initMetricAttributes2(dest pdata.Map) { +func initMetricAttributes2(dest pcommon.Map) { dest.UpsertString(TestLabelKey2, TestLabelValue2) } diff --git a/internal/coreinternal/testdata/log.go b/internal/coreinternal/testdata/log.go index 242ec061e08e..96933738ed00 100644 --- a/internal/coreinternal/testdata/log.go +++ b/internal/coreinternal/testdata/log.go @@ -17,47 +17,48 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) var ( TestLogTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) - TestLogTimestamp = pdata.NewTimestampFromTime(TestLogTime) + TestLogTimestamp = pcommon.NewTimestampFromTime(TestLogTime) ) -func GenerateLogsOneEmptyResourceLogs() pdata.Logs { - ld := pdata.NewLogs() +func GenerateLogsOneEmptyResourceLogs() plog.Logs { + ld := plog.NewLogs() ld.ResourceLogs().AppendEmpty() return ld } -func GenerateLogsNoLogRecords() pdata.Logs { +func GenerateLogsNoLogRecords() plog.Logs { ld := GenerateLogsOneEmptyResourceLogs() initResource1(ld.ResourceLogs().At(0).Resource()) return ld } -func GenerateLogsOneEmptyLogRecord() pdata.Logs { +func GenerateLogsOneEmptyLogRecord() plog.Logs { ld := GenerateLogsNoLogRecords() rs0 := ld.ResourceLogs().At(0) rs0.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() return ld } -func GenerateLogsOneLogRecordNoResource() pdata.Logs { +func GenerateLogsOneLogRecordNoResource() plog.Logs { ld := GenerateLogsOneEmptyResourceLogs() rs0 := ld.ResourceLogs().At(0) fillLogOne(rs0.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()) return ld } -func GenerateLogsOneLogRecord() pdata.Logs { +func GenerateLogsOneLogRecord() plog.Logs { ld := GenerateLogsOneEmptyLogRecord() fillLogOne(ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0)) return ld } -func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { +func GenerateLogsTwoLogRecordsSameResource() plog.Logs { ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords() fillLogOne(logs.At(0)) @@ -65,8 +66,8 @@ func GenerateLogsTwoLogRecordsSameResource() pdata.Logs { return ld } -func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { - ld := pdata.NewLogs() +func GenerateLogsTwoLogRecordsSameResourceOneDifferent() plog.Logs { + ld := plog.NewLogs() rl0 := ld.ResourceLogs().AppendEmpty() initResource1(rl0.Resource()) logs := rl0.ScopeLogs().AppendEmpty().LogRecords() @@ -77,14 +78,14 @@ func GenerateLogsTwoLogRecordsSameResourceOneDifferent() pdata.Logs { fillLogThree(rl1.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty()) return ld } -func fillLogOne(log pdata.LogRecord) { +func fillLogOne(log plog.LogRecord) { log.SetName("logA") log.SetTimestamp(TestLogTimestamp) log.SetDroppedAttributesCount(1) - log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityNumber(plog.SeverityNumberINFO) log.SetSeverityText("Info") - log.SetSpanID(pdata.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08})) - log.SetTraceID(pdata.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) + log.SetSpanID(pcommon.NewSpanID([8]byte{0x01, 0x02, 0x04, 0x08})) + log.SetTraceID(pcommon.NewTraceID([16]byte{0x08, 0x04, 0x02, 0x01})) attrs := log.Attributes() attrs.InsertString("app", "server") @@ -93,11 +94,11 @@ func fillLogOne(log pdata.LogRecord) { log.Body().SetStringVal("This is a log message") } -func fillLogTwo(log pdata.LogRecord) { +func fillLogTwo(log plog.LogRecord) { log.SetName("logB") log.SetTimestamp(TestLogTimestamp) log.SetDroppedAttributesCount(1) - log.SetSeverityNumber(pdata.SeverityNumberINFO) + log.SetSeverityNumber(plog.SeverityNumberINFO) log.SetSeverityText("Info") attrs := log.Attributes() @@ -107,22 +108,22 @@ func fillLogTwo(log pdata.LogRecord) { log.Body().SetStringVal("something happened") } -func fillLogThree(log pdata.LogRecord) { +func fillLogThree(log plog.LogRecord) { log.SetName("logC") log.SetTimestamp(TestLogTimestamp) log.SetDroppedAttributesCount(1) - log.SetSeverityNumber(pdata.SeverityNumberWARN) + log.SetSeverityNumber(plog.SeverityNumberWARN) log.SetSeverityText("Warning") log.Body().SetStringVal("something else happened") } -func GenerateLogsManyLogRecordsSameResource(count int) pdata.Logs { +func GenerateLogsManyLogRecordsSameResource(count int) plog.Logs { ld := GenerateLogsOneEmptyLogRecord() logs := ld.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords() logs.EnsureCapacity(count) for i := 0; i < count; i++ { - var l pdata.LogRecord + var l plog.LogRecord if i < logs.Len() { l = logs.At(i) } else { diff --git a/internal/coreinternal/testdata/metric.go b/internal/coreinternal/testdata/metric.go index fab80f8894e0..3b3381d2b7c6 100644 --- a/internal/coreinternal/testdata/metric.go +++ b/internal/coreinternal/testdata/metric.go @@ -17,18 +17,19 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) var ( TestMetricStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) - TestMetricStartTimestamp = pdata.NewTimestampFromTime(TestMetricStartTime) + TestMetricStartTimestamp = pcommon.NewTimestampFromTime(TestMetricStartTime) TestMetricExemplarTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) - TestMetricExemplarTimestamp = pdata.NewTimestampFromTime(TestMetricExemplarTime) + TestMetricExemplarTimestamp = pcommon.NewTimestampFromTime(TestMetricExemplarTime) TestMetricTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) - TestMetricTimestamp = pdata.NewTimestampFromTime(TestMetricTime) + TestMetricTimestamp = pcommon.NewTimestampFromTime(TestMetricTime) ) const ( @@ -41,26 +42,26 @@ const ( TestExponentialHistogramMetricName = "exponential-histogram" ) -func GenerateMetricsOneEmptyResourceMetrics() pdata.Metrics { - md := pdata.NewMetrics() +func GenerateMetricsOneEmptyResourceMetrics() pmetric.Metrics { + md := pmetric.NewMetrics() md.ResourceMetrics().AppendEmpty() return md } -func GenerateMetricsNoLibraries() pdata.Metrics { +func GenerateMetricsNoLibraries() pmetric.Metrics { md := GenerateMetricsOneEmptyResourceMetrics() ms0 := md.ResourceMetrics().At(0) initResource1(ms0.Resource()) return md } -func GenerateMetricsOneEmptyInstrumentationLibrary() pdata.Metrics { +func GenerateMetricsOneEmptyInstrumentationLibrary() pmetric.Metrics { md := GenerateMetricsNoLibraries() md.ResourceMetrics().At(0).ScopeMetrics().AppendEmpty() return md } -func GenerateMetricsOneMetricNoResource() pdata.Metrics { +func GenerateMetricsOneMetricNoResource() pmetric.Metrics { md := GenerateMetricsOneEmptyResourceMetrics() rm0 := md.ResourceMetrics().At(0) rm0ils0 := rm0.ScopeMetrics().AppendEmpty() @@ -68,14 +69,14 @@ func GenerateMetricsOneMetricNoResource() pdata.Metrics { return md } -func GenerateMetricsOneMetric() pdata.Metrics { +func GenerateMetricsOneMetric() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() rm0ils0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) return md } -func GenerateMetricsTwoMetrics() pdata.Metrics { +func GenerateMetricsTwoMetrics() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() rm0ils0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) @@ -83,7 +84,7 @@ func GenerateMetricsTwoMetrics() pdata.Metrics { return md } -func GenerateMetricsOneCounterOneSummaryMetrics() pdata.Metrics { +func GenerateMetricsOneCounterOneSummaryMetrics() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() rm0ils0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) initSumIntMetric(rm0ils0.Metrics().AppendEmpty()) @@ -91,64 +92,64 @@ func GenerateMetricsOneCounterOneSummaryMetrics() pdata.Metrics { return md } -func GenerateMetricsOneMetricNoAttributes() pdata.Metrics { +func GenerateMetricsOneMetricNoAttributes() pmetric.Metrics { md := GenerateMetricsOneMetric() dps := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints() - pdata.NewMap().CopyTo(dps.At(0).Attributes()) - pdata.NewMap().CopyTo(dps.At(1).Attributes()) + pcommon.NewMap().CopyTo(dps.At(0).Attributes()) + pcommon.NewMap().CopyTo(dps.At(1).Attributes()) return md } -func GenerateMetricsAllTypesNoDataPoints() pdata.Metrics { +func GenerateMetricsAllTypesNoDataPoints() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() ilm0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) ms := ilm0.Metrics() - initMetric(ms.AppendEmpty(), TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) - initMetric(ms.AppendEmpty(), TestGaugeIntMetricName, pdata.MetricDataTypeGauge) - initMetric(ms.AppendEmpty(), TestSumDoubleMetricName, pdata.MetricDataTypeSum) - initMetric(ms.AppendEmpty(), TestSumIntMetricName, pdata.MetricDataTypeSum) - initMetric(ms.AppendEmpty(), TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) - initMetric(ms.AppendEmpty(), TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) + initMetric(ms.AppendEmpty(), TestGaugeDoubleMetricName, pmetric.MetricDataTypeGauge) + initMetric(ms.AppendEmpty(), TestGaugeIntMetricName, pmetric.MetricDataTypeGauge) + initMetric(ms.AppendEmpty(), TestSumDoubleMetricName, pmetric.MetricDataTypeSum) + initMetric(ms.AppendEmpty(), TestSumIntMetricName, pmetric.MetricDataTypeSum) + initMetric(ms.AppendEmpty(), TestDoubleHistogramMetricName, pmetric.MetricDataTypeHistogram) + initMetric(ms.AppendEmpty(), TestDoubleSummaryMetricName, pmetric.MetricDataTypeSummary) return md } -func GenerateMetricsAllTypesEmptyDataPoint() pdata.Metrics { +func GenerateMetricsAllTypesEmptyDataPoint() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() ilm0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) ms := ilm0.Metrics() doubleGauge := ms.AppendEmpty() - initMetric(doubleGauge, TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) + initMetric(doubleGauge, TestGaugeDoubleMetricName, pmetric.MetricDataTypeGauge) doubleGauge.Gauge().DataPoints().AppendEmpty() intGauge := ms.AppendEmpty() - initMetric(intGauge, TestGaugeIntMetricName, pdata.MetricDataTypeGauge) + initMetric(intGauge, TestGaugeIntMetricName, pmetric.MetricDataTypeGauge) intGauge.Gauge().DataPoints().AppendEmpty() doubleSum := ms.AppendEmpty() - initMetric(doubleSum, TestSumDoubleMetricName, pdata.MetricDataTypeSum) + initMetric(doubleSum, TestSumDoubleMetricName, pmetric.MetricDataTypeSum) doubleSum.Sum().DataPoints().AppendEmpty() intSum := ms.AppendEmpty() - initMetric(intSum, TestSumIntMetricName, pdata.MetricDataTypeSum) + initMetric(intSum, TestSumIntMetricName, pmetric.MetricDataTypeSum) intSum.Sum().DataPoints().AppendEmpty() histogram := ms.AppendEmpty() - initMetric(histogram, TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) + initMetric(histogram, TestDoubleHistogramMetricName, pmetric.MetricDataTypeHistogram) histogram.Histogram().DataPoints().AppendEmpty() summary := ms.AppendEmpty() - initMetric(summary, TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) + initMetric(summary, TestDoubleSummaryMetricName, pmetric.MetricDataTypeSummary) summary.Summary().DataPoints().AppendEmpty() exphist := ms.AppendEmpty() - initMetric(exphist, TestExponentialHistogramMetricName, pdata.MetricDataTypeExponentialHistogram) + initMetric(exphist, TestExponentialHistogramMetricName, pmetric.MetricDataTypeExponentialHistogram) exphist.ExponentialHistogram().DataPoints().AppendEmpty() return md } -func GenerateMetricsMetricTypeInvalid() pdata.Metrics { +func GenerateMetricsMetricTypeInvalid() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() ilm0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) - initMetric(ilm0.Metrics().AppendEmpty(), TestSumIntMetricName, pdata.MetricDataTypeNone) + initMetric(ilm0.Metrics().AppendEmpty(), TestSumIntMetricName, pmetric.MetricDataTypeNone) return md } -func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { +func GeneratMetricsAllTypesWithSampleDatapoints() pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() ilm := md.ResourceMetrics().At(0).ScopeMetrics().At(0) @@ -163,8 +164,8 @@ func GeneratMetricsAllTypesWithSampleDatapoints() pdata.Metrics { return md } -func initGaugeIntMetric(im pdata.Metric) { - initMetric(im, TestGaugeIntMetricName, pdata.MetricDataTypeGauge) +func initGaugeIntMetric(im pmetric.Metric) { + initMetric(im, TestGaugeIntMetricName, pmetric.MetricDataTypeGauge) idps := im.Gauge().DataPoints() idp0 := idps.AppendEmpty() @@ -179,8 +180,8 @@ func initGaugeIntMetric(im pdata.Metric) { idp1.SetIntVal(456) } -func initGaugeDoubleMetric(im pdata.Metric) { - initMetric(im, TestGaugeDoubleMetricName, pdata.MetricDataTypeGauge) +func initGaugeDoubleMetric(im pmetric.Metric) { + initMetric(im, TestGaugeDoubleMetricName, pmetric.MetricDataTypeGauge) idps := im.Gauge().DataPoints() idp0 := idps.AppendEmpty() @@ -195,8 +196,8 @@ func initGaugeDoubleMetric(im pdata.Metric) { idp1.SetDoubleVal(4.56) } -func initSumIntMetric(im pdata.Metric) { - initMetric(im, TestSumIntMetricName, pdata.MetricDataTypeSum) +func initSumIntMetric(im pmetric.Metric) { + initMetric(im, TestSumIntMetricName, pmetric.MetricDataTypeSum) idps := im.Sum().DataPoints() idp0 := idps.AppendEmpty() @@ -211,8 +212,8 @@ func initSumIntMetric(im pdata.Metric) { idp1.SetIntVal(456) } -func initSumDoubleMetric(dm pdata.Metric) { - initMetric(dm, TestSumDoubleMetricName, pdata.MetricDataTypeSum) +func initSumDoubleMetric(dm pmetric.Metric) { + initMetric(dm, TestSumDoubleMetricName, pmetric.MetricDataTypeSum) ddps := dm.Sum().DataPoints() ddp0 := ddps.AppendEmpty() @@ -228,8 +229,8 @@ func initSumDoubleMetric(dm pdata.Metric) { ddp1.SetDoubleVal(4.56) } -func initDoubleHistogramMetric(hm pdata.Metric) { - initMetric(hm, TestDoubleHistogramMetricName, pdata.MetricDataTypeHistogram) +func initDoubleHistogramMetric(hm pmetric.Metric) { + initMetric(hm, TestDoubleHistogramMetricName, pmetric.MetricDataTypeHistogram) hdps := hm.Histogram().DataPoints() hdp0 := hdps.AppendEmpty() @@ -252,8 +253,8 @@ func initDoubleHistogramMetric(hm pdata.Metric) { hdp1.SetExplicitBounds([]float64{1}) } -func initDoubleSummaryMetric(sm pdata.Metric) { - initMetric(sm, TestDoubleSummaryMetricName, pdata.MetricDataTypeSummary) +func initDoubleSummaryMetric(sm pmetric.Metric) { + initMetric(sm, TestDoubleSummaryMetricName, pmetric.MetricDataTypeSummary) sdps := sm.Summary().DataPoints() sdp0 := sdps.AppendEmpty() @@ -274,23 +275,23 @@ func initDoubleSummaryMetric(sm pdata.Metric) { quantile.SetValue(15) } -func initMetric(m pdata.Metric, name string, ty pdata.MetricDataType) { +func initMetric(m pmetric.Metric, name string, ty pmetric.MetricDataType) { m.SetName(name) m.SetDescription("") m.SetUnit("1") m.SetDataType(ty) switch ty { - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: sum := m.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - case pdata.MetricDataTypeHistogram: + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + case pmetric.MetricDataTypeHistogram: histo := m.Histogram() - histo.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histo.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } } -func GenerateMetricsManyMetricsSameResource(metricsCount int) pdata.Metrics { +func GenerateMetricsManyMetricsSameResource(metricsCount int) pmetric.Metrics { md := GenerateMetricsOneEmptyInstrumentationLibrary() rs0ilm0 := md.ResourceMetrics().At(0).ScopeMetrics().At(0) rs0ilm0.Metrics().EnsureCapacity(metricsCount) diff --git a/internal/coreinternal/testdata/resource.go b/internal/coreinternal/testdata/resource.go index f5d2b8f726d2..6dd3d85c5cf5 100644 --- a/internal/coreinternal/testdata/resource.go +++ b/internal/coreinternal/testdata/resource.go @@ -14,14 +14,12 @@ package testdata -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/pdata/pcommon" -func initResource1(r pdata.Resource) { +func initResource1(r pcommon.Resource) { initResourceAttributes1(r.Attributes()) } -func initResource2(r pdata.Resource) { +func initResource2(r pcommon.Resource) { initResourceAttributes2(r.Attributes()) } diff --git a/internal/coreinternal/testdata/trace.go b/internal/coreinternal/testdata/trace.go index 2e53e969d5f0..54d5e91b7ed8 100644 --- a/internal/coreinternal/testdata/trace.go +++ b/internal/coreinternal/testdata/trace.go @@ -17,54 +17,55 @@ package testdata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) var ( TestSpanStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) - TestSpanStartTimestamp = pdata.NewTimestampFromTime(TestSpanStartTime) + TestSpanStartTimestamp = pcommon.NewTimestampFromTime(TestSpanStartTime) TestSpanEventTime = time.Date(2020, 2, 11, 20, 26, 13, 123, time.UTC) - TestSpanEventTimestamp = pdata.NewTimestampFromTime(TestSpanEventTime) + TestSpanEventTimestamp = pcommon.NewTimestampFromTime(TestSpanEventTime) TestSpanEndTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) - TestSpanEndTimestamp = pdata.NewTimestampFromTime(TestSpanEndTime) + TestSpanEndTimestamp = pcommon.NewTimestampFromTime(TestSpanEndTime) ) -func GenerateTracesOneEmptyResourceSpans() pdata.Traces { - td := pdata.NewTraces() +func GenerateTracesOneEmptyResourceSpans() ptrace.Traces { + td := ptrace.NewTraces() td.ResourceSpans().AppendEmpty() return td } -func GenerateTracesNoLibraries() pdata.Traces { +func GenerateTracesNoLibraries() ptrace.Traces { td := GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) initResource1(rs0.Resource()) return td } -func GenerateTracesOneEmptyInstrumentationLibrary() pdata.Traces { +func GenerateTracesOneEmptyInstrumentationLibrary() ptrace.Traces { td := GenerateTracesNoLibraries() td.ResourceSpans().At(0).ScopeSpans().AppendEmpty() return td } -func GenerateTracesOneSpanNoResource() pdata.Traces { +func GenerateTracesOneSpanNoResource() ptrace.Traces { td := GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) fillSpanOne(rs0.ScopeSpans().AppendEmpty().Spans().AppendEmpty()) return td } -func GenerateTracesOneSpan() pdata.Traces { +func GenerateTracesOneSpan() ptrace.Traces { td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).ScopeSpans().At(0) fillSpanOne(rs0ils0.Spans().AppendEmpty()) return td } -func GenerateTracesTwoSpansSameResource() pdata.Traces { +func GenerateTracesTwoSpansSameResource() ptrace.Traces { td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).ScopeSpans().At(0) fillSpanOne(rs0ils0.Spans().AppendEmpty()) @@ -72,8 +73,8 @@ func GenerateTracesTwoSpansSameResource() pdata.Traces { return td } -func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { - td := pdata.NewTraces() +func GenerateTracesTwoSpansSameResourceOneDifferent() ptrace.Traces { + td := ptrace.NewTraces() rs0 := td.ResourceSpans().AppendEmpty() initResource1(rs0.Resource()) rs0ils0 := rs0.ScopeSpans().AppendEmpty() @@ -86,7 +87,7 @@ func GenerateTracesTwoSpansSameResourceOneDifferent() pdata.Traces { return td } -func GenerateTracesManySpansSameResource(spanCount int) pdata.Traces { +func GenerateTracesManySpansSameResource(spanCount int) ptrace.Traces { td := GenerateTracesOneEmptyInstrumentationLibrary() rs0ils0 := td.ResourceSpans().At(0).ScopeSpans().At(0) rs0ils0.Spans().EnsureCapacity(spanCount) @@ -96,7 +97,7 @@ func GenerateTracesManySpansSameResource(spanCount int) pdata.Traces { return td } -func fillSpanOne(span pdata.Span) { +func fillSpanOne(span ptrace.Span) { span.SetName("operationA") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) @@ -113,11 +114,11 @@ func fillSpanOne(span pdata.Span) { ev1.SetDroppedAttributesCount(2) span.SetDroppedEventsCount(1) status := span.Status() - status.SetCode(pdata.StatusCodeError) + status.SetCode(ptrace.StatusCodeError) status.SetMessage("status-cancelled") } -func fillSpanTwo(span pdata.Span) { +func fillSpanTwo(span ptrace.Span) { span.SetName("operationB") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) @@ -129,7 +130,7 @@ func fillSpanTwo(span pdata.Span) { span.SetDroppedLinksCount(3) } -func fillSpanThree(span pdata.Span) { +func fillSpanThree(span ptrace.Span) { span.SetName("operationC") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) diff --git a/internal/coreinternal/tracetranslator/protospan_translation.go b/internal/coreinternal/tracetranslator/protospan_translation.go index 8b6b29a3deae..38b9fd4612f8 100644 --- a/internal/coreinternal/tracetranslator/protospan_translation.go +++ b/internal/coreinternal/tracetranslator/protospan_translation.go @@ -15,7 +15,7 @@ package tracetranslator // import "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) // Some of the keys used to represent OTLP constructs as tags or annotations in other formats. @@ -53,9 +53,9 @@ const ( // StatusCodeFromHTTP takes an HTTP status code and return the appropriate OpenTelemetry status code // See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#status -func StatusCodeFromHTTP(httpStatusCode int64) pdata.StatusCode { +func StatusCodeFromHTTP(httpStatusCode int64) ptrace.StatusCode { if httpStatusCode >= 100 && httpStatusCode < 399 { - return pdata.StatusCodeUnset + return ptrace.StatusCodeUnset } - return pdata.StatusCodeError + return ptrace.StatusCodeError } diff --git a/internal/docker/go.mod b/internal/docker/go.mod index 0165b0e55214..63abeae0b3b2 100644 --- a/internal/docker/go.mod +++ b/internal/docker/go.mod @@ -36,3 +36,5 @@ require ( google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/k8sconfig/go.mod b/internal/k8sconfig/go.mod index ba2a1668380d..d0801cac58ff 100644 --- a/internal/k8sconfig/go.mod +++ b/internal/k8sconfig/go.mod @@ -41,3 +41,5 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/kubelet/go.mod b/internal/kubelet/go.mod index 7b547c28aa2f..fba7f945c22d 100644 --- a/internal/kubelet/go.mod +++ b/internal/kubelet/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -30,10 +30,10 @@ require ( github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/appengine v1.6.7 // indirect @@ -55,3 +55,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => ../../internal/k8sconfig + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/kubelet/go.sum b/internal/kubelet/go.sum index 4bbeb2a6929f..0aede13c1e64 100644 --- a/internal/kubelet/go.sum +++ b/internal/kubelet/go.sum @@ -277,8 +277,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -372,8 +372,9 @@ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -448,13 +449,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/scrapertest/compare.go b/internal/scrapertest/compare.go index f028ff895547..25f319e03eb4 100644 --- a/internal/scrapertest/compare.go +++ b/internal/scrapertest/compare.go @@ -18,17 +18,17 @@ import ( "fmt" "reflect" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" ) // CompareOption is applied by the CompareMetricSlices function // to mutates an expected and/or actual result before comparing. type CompareOption interface { - apply(expected, actual pdata.Metrics) + apply(expected, actual pmetric.Metrics) } -func CompareMetrics(expected, actual pdata.Metrics, options ...CompareOption) error { +func CompareMetrics(expected, actual pmetric.Metrics, options ...CompareOption) error { expected, actual = expected.Clone(), actual.Clone() for _, option := range options { @@ -43,7 +43,7 @@ func CompareMetrics(expected, actual pdata.Metrics, options ...CompareOption) er numResources := expectedMetrics.Len() // Keep track of matching resources so that each can only be matched once - matchingResources := make(map[pdata.ResourceMetrics]pdata.ResourceMetrics, numResources) + matchingResources := make(map[pmetric.ResourceMetrics]pmetric.ResourceMetrics, numResources) var errs error for e := 0; e < numResources; e++ { @@ -85,7 +85,7 @@ func CompareMetrics(expected, actual pdata.Metrics, options ...CompareOption) er return errs } -func CompareResourceMetrics(expected, actual pdata.ResourceMetrics) error { +func CompareResourceMetrics(expected, actual pmetric.ResourceMetrics) error { eilms := expected.ScopeMetrics() ailms := actual.ScopeMetrics() @@ -117,7 +117,7 @@ func CompareResourceMetrics(expected, actual pdata.ResourceMetrics) error { // CompareMetricSlices compares each part of two given MetricSlices and returns // an error if they don't match. The error describes what didn't match. The // expected and actual values are clones before options are applied. -func CompareMetricSlices(expected, actual pdata.MetricSlice) error { +func CompareMetricSlices(expected, actual pmetric.MetricSlice) error { if expected.Len() != actual.Len() { return fmt.Errorf("metric slices not of same length") } @@ -154,14 +154,14 @@ func CompareMetricSlices(expected, actual pdata.MetricSlice) error { return fmt.Errorf("metric DataType does not match expected: %s, actual: %s", expectedMetric.DataType(), actualMetric.DataType()) } - var expectedDataPoints pdata.NumberDataPointSlice - var actualDataPoints pdata.NumberDataPointSlice + var expectedDataPoints pmetric.NumberDataPointSlice + var actualDataPoints pmetric.NumberDataPointSlice switch actualMetric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: expectedDataPoints = expectedMetric.Gauge().DataPoints() actualDataPoints = actualMetric.Gauge().DataPoints() - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: if actualMetric.Sum().AggregationTemporality() != expectedMetric.Sum().AggregationTemporality() { return fmt.Errorf("metric AggregationTemporality does not match expected: %s, actual: %s", expectedMetric.Sum().AggregationTemporality(), actualMetric.Sum().AggregationTemporality()) } @@ -181,7 +181,7 @@ func CompareMetricSlices(expected, actual pdata.MetricSlice) error { // CompareNumberDataPointSlices compares each part of two given NumberDataPointSlices and returns // an error if they don't match. The error describes what didn't match. -func CompareNumberDataPointSlices(expected, actual pdata.NumberDataPointSlice) error { +func CompareNumberDataPointSlices(expected, actual pmetric.NumberDataPointSlice) error { if expected.Len() != actual.Len() { return fmt.Errorf("length of datapoints don't match") } @@ -189,7 +189,7 @@ func CompareNumberDataPointSlices(expected, actual pdata.NumberDataPointSlice) e numPoints := expected.Len() // Keep track of matching data points so that each point can only be matched once - matchingDPS := make(map[pdata.NumberDataPoint]pdata.NumberDataPoint, numPoints) + matchingDPS := make(map[pmetric.NumberDataPoint]pmetric.NumberDataPoint, numPoints) var errs error for e := 0; e < numPoints; e++ { @@ -232,7 +232,7 @@ func CompareNumberDataPointSlices(expected, actual pdata.NumberDataPointSlice) e // CompareNumberDataPoints compares each part of two given NumberDataPoints and returns // an error if they don't match. The error describes what didn't match. -func CompareNumberDataPoints(expected, actual pdata.NumberDataPoint) error { +func CompareNumberDataPoints(expected, actual pmetric.NumberDataPoint) error { if expected.ValueType() != actual.ValueType() { return fmt.Errorf("metric datapoint types don't match: expected type: %s, actual type: %s", numberTypeToString(expected.ValueType()), numberTypeToString(actual.ValueType())) } @@ -245,11 +245,11 @@ func CompareNumberDataPoints(expected, actual pdata.NumberDataPoint) error { return nil } -func numberTypeToString(t pdata.MetricValueType) string { +func numberTypeToString(t pmetric.MetricValueType) string { switch t { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return "int" - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return "double" default: return "none" diff --git a/internal/scrapertest/go.mod b/internal/scrapertest/go.mod index 0249745ecbaf..2aeb63339675 100644 --- a/internal/scrapertest/go.mod +++ b/internal/scrapertest/go.mod @@ -4,25 +4,20 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect - golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f // indirect - golang.org/x/text v0.3.3 // indirect - google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/scraperhelper => ../../receiver/scraperhelper + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/scrapertest/go.sum b/internal/scrapertest/go.sum index 41b0b82e05ff..ea09475e6698 100644 --- a/internal/scrapertest/go.sum +++ b/internal/scrapertest/go.sum @@ -1,54 +1,9 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -60,21 +15,17 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -82,89 +33,33 @@ go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95a golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/scrapertest/golden/golden.go b/internal/scrapertest/golden/golden.go index 8f62449ca8cb..97f686e2eff3 100644 --- a/internal/scrapertest/golden/golden.go +++ b/internal/scrapertest/golden/golden.go @@ -18,23 +18,22 @@ import ( "encoding/json" "io/ioutil" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) -// ReadMetrics reads a pdata.Metrics from the specified file -func ReadMetrics(filePath string) (pdata.Metrics, error) { +// ReadMetrics reads a pmetric.Metrics from the specified file +func ReadMetrics(filePath string) (pmetric.Metrics, error) { expectedFileBytes, err := ioutil.ReadFile(filePath) if err != nil { - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } - unmarshaller := otlp.NewJSONMetricsUnmarshaler() + unmarshaller := pmetric.NewJSONUnmarshaler() return unmarshaller.UnmarshalMetrics(expectedFileBytes) } -// WriteMetrics writes a pdata.Metrics to the specified file -func WriteMetrics(filePath string, metrics pdata.Metrics) error { - bytes, err := otlp.NewJSONMetricsMarshaler().MarshalMetrics(metrics) +// WriteMetrics writes a pmetric.Metrics to the specified file +func WriteMetrics(filePath string, metrics pmetric.Metrics) error { + bytes, err := pmetric.NewJSONMarshaler().MarshalMetrics(metrics) if err != nil { return err } diff --git a/internal/scrapertest/golden/golden_test.go b/internal/scrapertest/golden/golden_test.go index 8865bafb11d6..3c5d023f9fdf 100644 --- a/internal/scrapertest/golden/golden_test.go +++ b/internal/scrapertest/golden/golden_test.go @@ -21,12 +21,13 @@ import ( "time" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestWriteMetrics(t *testing.T) { metricslice := testMetrics() - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() metricslice.CopyTo(metrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()) tempDir := filepath.Join(t.TempDir(), "metrics.json") @@ -44,7 +45,7 @@ func TestWriteMetrics(t *testing.T) { func TestReadMetrics(t *testing.T) { metricslice := testMetrics() - expectedMetrics := pdata.NewMetrics() + expectedMetrics := pmetric.NewMetrics() metricslice.CopyTo(expectedMetrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()) expectedFile := filepath.Join("testdata", "roundtrip", "expected.json") @@ -55,7 +56,7 @@ func TestReadMetrics(t *testing.T) { func TestRoundTrip(t *testing.T) { metricslice := testMetrics() - expectedMetrics := pdata.NewMetrics() + expectedMetrics := pmetric.NewMetrics() metricslice.CopyTo(expectedMetrics.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics()) tempDir := filepath.Join(t.TempDir(), "metrics.json") @@ -67,8 +68,8 @@ func TestRoundTrip(t *testing.T) { require.Equal(t, expectedMetrics, actualMetrics) } -func testMetrics() pdata.MetricSlice { - slice := pdata.NewMetricSlice() +func testMetrics() pmetric.MetricSlice { + slice := pmetric.NewMetricSlice() // Gauge with two double dps metric := slice.AppendEmpty() @@ -76,15 +77,15 @@ func testMetrics() pdata.MetricSlice { dps := metric.Gauge().DataPoints() dp := dps.AppendEmpty() - attributes := pdata.NewMap() - attributes.Insert("testKey1", pdata.NewValueString("teststringvalue1")) - attributes.Insert("testKey2", pdata.NewValueString("testvalue1")) + attributes := pcommon.NewMap() + attributes.Insert("testKey1", pcommon.NewValueString("teststringvalue1")) + attributes.Insert("testKey2", pcommon.NewValueString("testvalue1")) setDPDoubleVal(dp, 2, attributes, time.Time{}) dp = dps.AppendEmpty() - attributes = pdata.NewMap() - attributes.Insert("testKey1", pdata.NewValueString("teststringvalue2")) - attributes.Insert("testKey2", pdata.NewValueString("testvalue2")) + attributes = pcommon.NewMap() + attributes.Insert("testKey1", pcommon.NewValueString("teststringvalue2")) + attributes.Insert("testKey2", pcommon.NewValueString("testvalue2")) setDPDoubleVal(dp, 2, attributes, time.Time{}) // Gauge with one int dp @@ -93,57 +94,57 @@ func testMetrics() pdata.MetricSlice { dps = metric.Gauge().DataPoints() dp = dps.AppendEmpty() - attributes = pdata.NewMap() - attributes.Insert("testKey2", pdata.NewValueString("teststringvalue2")) + attributes = pcommon.NewMap() + attributes.Insert("testKey2", pcommon.NewValueString("teststringvalue2")) setDPIntVal(dp, 2, attributes, time.Time{}) // Delta Sum with two int dps metric = slice.AppendEmpty() - initSum(metric, "test delta sum multi", "multi sum", "s", pdata.MetricAggregationTemporalityDelta, false) + initSum(metric, "test delta sum multi", "multi sum", "s", pmetric.MetricAggregationTemporalityDelta, false) dps = metric.Sum().DataPoints() dp = dps.AppendEmpty() - attributes = pdata.NewMap() - attributes.Insert("testKey2", pdata.NewValueString("teststringvalue2")) + attributes = pcommon.NewMap() + attributes.Insert("testKey2", pcommon.NewValueString("teststringvalue2")) setDPIntVal(dp, 2, attributes, time.Time{}) dp = dps.AppendEmpty() - attributes = pdata.NewMap() - attributes.Insert("testKey2", pdata.NewValueString("teststringvalue2")) + attributes = pcommon.NewMap() + attributes.Insert("testKey2", pcommon.NewValueString("teststringvalue2")) setDPIntVal(dp, 2, attributes, time.Time{}) // Cumulative Sum with one double dp metric = slice.AppendEmpty() - initSum(metric, "test cumulative sum single", "single sum", "1/s", pdata.MetricAggregationTemporalityCumulative, true) + initSum(metric, "test cumulative sum single", "single sum", "1/s", pmetric.MetricAggregationTemporalityCumulative, true) dps = metric.Sum().DataPoints() dp = dps.AppendEmpty() - attributes = pdata.NewMap() + attributes = pcommon.NewMap() setDPDoubleVal(dp, 2, attributes, time.Date(1997, 07, 27, 1, 1, 1, 1, &time.Location{})) return slice } -func setDPDoubleVal(dp pdata.NumberDataPoint, value float64, attributes pdata.Map, timeStamp time.Time) { +func setDPDoubleVal(dp pmetric.NumberDataPoint, value float64, attributes pcommon.Map, timeStamp time.Time) { dp.SetDoubleVal(value) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeStamp)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeStamp)) attributes.CopyTo(dp.Attributes()) } -func setDPIntVal(dp pdata.NumberDataPoint, value int64, attributes pdata.Map, timeStamp time.Time) { +func setDPIntVal(dp pmetric.NumberDataPoint, value int64, attributes pcommon.Map, timeStamp time.Time) { dp.SetIntVal(value) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeStamp)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeStamp)) attributes.CopyTo(dp.Attributes()) } -func initGauge(metric pdata.Metric, name, desc, unit string) { - metric.SetDataType(pdata.MetricDataTypeGauge) +func initGauge(metric pmetric.Metric, name, desc, unit string) { + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetDescription(desc) metric.SetName(name) metric.SetUnit(unit) } -func initSum(metric pdata.Metric, name, desc, unit string, aggr pdata.MetricAggregationTemporality, isMonotonic bool) { - metric.SetDataType(pdata.MetricDataTypeSum) +func initSum(metric pmetric.Metric, name, desc, unit string, aggr pmetric.MetricAggregationTemporality, isMonotonic bool) { + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(isMonotonic) metric.Sum().SetAggregationTemporality(aggr) metric.SetDescription(desc) diff --git a/internal/scrapertest/mask.go b/internal/scrapertest/mask.go index af3e9268d399..15f370e5619e 100644 --- a/internal/scrapertest/mask.go +++ b/internal/scrapertest/mask.go @@ -17,7 +17,8 @@ package scrapertest // import "github.com/open-telemetry/opentelemetry-collector import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // IgnoreMetricValues is a CompareOption that clears all values @@ -27,12 +28,12 @@ func IgnoreMetricValues() CompareOption { type ignoreMetricValues struct{} -func (opt ignoreMetricValues) apply(expected, actual pdata.Metrics) { +func (opt ignoreMetricValues) apply(expected, actual pmetric.Metrics) { maskMetricValues(expected) maskMetricValues(actual) } -func maskMetricValues(metrics pdata.Metrics) { +func maskMetricValues(metrics pmetric.Metrics) { rms := metrics.ResourceMetrics() for i := 0; i < rms.Len(); i++ { ilms := rms.At(i).ScopeMetrics() @@ -43,14 +44,14 @@ func maskMetricValues(metrics pdata.Metrics) { } // maskMetricSliceValues sets all data point values to zero. -func maskMetricSliceValues(metrics pdata.MetricSlice) { +func maskMetricSliceValues(metrics pmetric.MetricSlice) { for i := 0; i < metrics.Len(); i++ { maskDataPointSliceValues(getDataPointSlice(metrics.At(i))) } } // maskDataPointSliceValues sets all data point values to zero. -func maskDataPointSliceValues(dataPoints pdata.NumberDataPointSlice) { +func maskDataPointSliceValues(dataPoints pmetric.NumberDataPointSlice) { for i := 0; i < dataPoints.Len(); i++ { dataPoint := dataPoints.At(i) dataPoint.SetIntVal(0) @@ -71,12 +72,12 @@ type ignoreMetricAttributeValue struct { metricNames []string } -func (opt ignoreMetricAttributeValue) apply(expected, actual pdata.Metrics) { +func (opt ignoreMetricAttributeValue) apply(expected, actual pmetric.Metrics) { maskMetricAttributeValue(expected, opt) maskMetricAttributeValue(actual, opt) } -func maskMetricAttributeValue(metrics pdata.Metrics, opt ignoreMetricAttributeValue) { +func maskMetricAttributeValue(metrics pmetric.Metrics, opt ignoreMetricAttributeValue) { rms := metrics.ResourceMetrics() for i := 0; i < rms.Len(); i++ { ilms := rms.At(i).ScopeMetrics() @@ -90,7 +91,7 @@ func maskMetricAttributeValue(metrics pdata.Metrics, opt ignoreMetricAttributeVa // the zero value associated with the attribute data type. // If metric names are specified, only the data points within those metrics will be masked. // Otherwise, all data points with the attribute will be masked. -func maskMetricSliceAttributeValues(metrics pdata.MetricSlice, attributeName string, metricNames ...string) { +func maskMetricSliceAttributeValues(metrics pmetric.MetricSlice, attributeName string, metricNames ...string) { metricNameSet := make(map[string]bool, len(metricNames)) for _, metricName := range metricNames { metricNameSet[metricName] = true @@ -104,7 +105,7 @@ func maskMetricSliceAttributeValues(metrics pdata.MetricSlice, attributeName str // If attribute values are ignored, some data points may become // indistinguishable from each other, but sorting by value allows // for a reasonably thorough comparison and a deterministic outcome. - dps.Sort(func(a, b pdata.NumberDataPoint) bool { + dps.Sort(func(a, b pmetric.NumberDataPoint) bool { if a.IntVal() < b.IntVal() { return true } @@ -119,13 +120,13 @@ func maskMetricSliceAttributeValues(metrics pdata.MetricSlice, attributeName str // maskDataPointSliceAttributeValues sets the value of the specified attribute to // the zero value associated with the attribute data type. -func maskDataPointSliceAttributeValues(dataPoints pdata.NumberDataPointSlice, attributeName string) { +func maskDataPointSliceAttributeValues(dataPoints pmetric.NumberDataPointSlice, attributeName string) { for i := 0; i < dataPoints.Len(); i++ { attributes := dataPoints.At(i).Attributes() attribute, ok := attributes.Get(attributeName) if ok { switch attribute.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: attributes.UpdateString(attributeName, "") default: panic(fmt.Sprintf("data type not supported: %s", attribute.Type())) diff --git a/internal/scrapertest/util.go b/internal/scrapertest/util.go index 9895cb7ac829..f579fddc829b 100644 --- a/internal/scrapertest/util.go +++ b/internal/scrapertest/util.go @@ -17,11 +17,11 @@ package scrapertest // import "github.com/open-telemetry/opentelemetry-collector import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) -func metricsByName(metricSlice pdata.MetricSlice) map[string]pdata.Metric { - byName := make(map[string]pdata.Metric, metricSlice.Len()) +func metricsByName(metricSlice pmetric.MetricSlice) map[string]pmetric.Metric { + byName := make(map[string]pmetric.Metric, metricSlice.Len()) for i := 0; i < metricSlice.Len(); i++ { a := metricSlice.At(i) byName[a.Name()] = a @@ -29,12 +29,12 @@ func metricsByName(metricSlice pdata.MetricSlice) map[string]pdata.Metric { return byName } -func getDataPointSlice(metric pdata.Metric) pdata.NumberDataPointSlice { - var dataPointSlice pdata.NumberDataPointSlice +func getDataPointSlice(metric pmetric.Metric) pmetric.NumberDataPointSlice { + var dataPointSlice pmetric.NumberDataPointSlice switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dataPointSlice = metric.Gauge().DataPoints() - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dataPointSlice = metric.Sum().DataPoints() default: panic(fmt.Sprintf("data type not supported: %s", metric.DataType())) @@ -42,7 +42,7 @@ func getDataPointSlice(metric pdata.Metric) pdata.NumberDataPointSlice { return dataPointSlice } -func sortInstrumentationLibrary(a, b pdata.ScopeMetrics) bool { +func sortInstrumentationLibrary(a, b pmetric.ScopeMetrics) bool { if a.SchemaUrl() < b.SchemaUrl() { return true } diff --git a/internal/sharedcomponent/go.mod b/internal/sharedcomponent/go.mod index 335d55f6e23a..6d75ff08baae 100644 --- a/internal/sharedcomponent/go.mod +++ b/internal/sharedcomponent/go.mod @@ -4,14 +4,14 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -19,8 +19,7 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -30,3 +29,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/sharedcomponent/go.sum b/internal/sharedcomponent/go.sum index 00fe7d30d45b..a88fccf0659e 100644 --- a/internal/sharedcomponent/go.sum +++ b/internal/sharedcomponent/go.sum @@ -70,8 +70,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -115,8 +115,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -128,10 +126,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= @@ -190,7 +188,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/internal/splunk/go.mod b/internal/splunk/go.mod index 7d0f37b32754..9f5a987ff0c3 100644 --- a/internal/splunk/go.mod +++ b/internal/splunk/go.mod @@ -4,18 +4,18 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -23,19 +23,14 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/splunk/go.sum b/internal/splunk/go.sum index 6df0e6e41f95..321027ecd9f8 100644 --- a/internal/splunk/go.sum +++ b/internal/splunk/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,19 +15,11 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -38,16 +27,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -64,18 +49,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -84,12 +64,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -118,8 +96,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -160,20 +138,15 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -183,18 +156,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -218,20 +192,15 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -247,22 +216,17 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -283,22 +247,14 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -308,11 +264,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -320,8 +272,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/splunk/hostid.go b/internal/splunk/hostid.go index 09f0d2746223..bc02c4c9fb16 100644 --- a/internal/splunk/hostid.go +++ b/internal/splunk/hostid.go @@ -18,8 +18,8 @@ import ( "fmt" "strings" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) // HostIDKey represents a host identifier. @@ -47,7 +47,7 @@ type HostID struct { // ResourceToHostID returns a boolean determining whether or not a HostID was able to be // computed or not. -func ResourceToHostID(res pdata.Resource) (HostID, bool) { +func ResourceToHostID(res pcommon.Resource) (HostID, bool) { var cloudAccount, hostID, provider string attrs := res.Attributes() @@ -113,7 +113,7 @@ func ResourceToHostID(res pdata.Resource) (HostID, bool) { return HostID{}, false } -func azureID(attrs pdata.Map, cloudAccount string) string { +func azureID(attrs pcommon.Map, cloudAccount string) string { var resourceGroupName string if attr, ok := attrs.Get("azure.resourcegroup.name"); ok { resourceGroupName = attr.StringVal() diff --git a/internal/splunk/hostid_test.go b/internal/splunk/hostid_test.go index fb55f2666733..3837a71e2793 100644 --- a/internal/splunk/hostid_test.go +++ b/internal/splunk/hostid_test.go @@ -18,13 +18,13 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) var ( - ec2Resource = func() pdata.Resource { - res := pdata.NewResource() + ec2Resource = func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attr.InsertString(conventions.AttributeCloudAccountID, "1234") @@ -32,8 +32,8 @@ var ( attr.InsertString(conventions.AttributeHostID, "i-abcd") return res }() - ec2WithHost = func() pdata.Resource { - res := pdata.NewResource() + ec2WithHost = func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attr.InsertString(conventions.AttributeCloudAccountID, "1234") @@ -42,30 +42,30 @@ var ( attr.InsertString(conventions.AttributeHostName, "localhost") return res }() - ec2PartialResource = func() pdata.Resource { - res := pdata.NewResource() + ec2PartialResource = func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attr.InsertString(conventions.AttributeHostID, "i-abcd") return res }() - gcpResource = func() pdata.Resource { - res := pdata.NewResource() + gcpResource = func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderGCP) attr.InsertString(conventions.AttributeCloudAccountID, "1234") attr.InsertString(conventions.AttributeHostID, "i-abcd") return res }() - gcpPartialResource = func() pdata.Resource { - res := pdata.NewResource() + gcpPartialResource = func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderGCP) attr.InsertString(conventions.AttributeCloudAccountID, "1234") return res }() - azureResource = func() pdata.Resource { - res := pdata.NewResource() + azureResource = func() pcommon.Resource { + res := pcommon.NewResource() attrs := res.Attributes() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAzure) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAzureVM) @@ -77,8 +77,8 @@ var ( attrs.InsertString("azure.resourcegroup.name", "myResourcegroupName") return res }() - azureScalesetResource = func() pdata.Resource { - res := pdata.NewResource() + azureScalesetResource = func() pcommon.Resource { + res := pcommon.NewResource() attrs := res.Attributes() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAzure) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAzureVM) @@ -91,8 +91,8 @@ var ( attrs.InsertString("azure.resourcegroup.name", "myResourcegroupName") return res }() - azureMissingCloudAcct = func() pdata.Resource { - res := pdata.NewResource() + azureMissingCloudAcct = func() pcommon.Resource { + res := pcommon.NewResource() attrs := res.Attributes() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAzure) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAzureVM) @@ -102,8 +102,8 @@ var ( attrs.InsertString("azure.resourcegroup.name", "myResourcegroupName") return res }() - azureMissingResourceGroup = func() pdata.Resource { - res := pdata.NewResource() + azureMissingResourceGroup = func() pcommon.Resource { + res := pcommon.NewResource() attrs := res.Attributes() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAzure) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAzureVM) @@ -113,8 +113,8 @@ var ( attrs.InsertString("azure.vm.size", "42") return res }() - azureMissingHostName = func() pdata.Resource { - res := pdata.NewResource() + azureMissingHostName = func() pcommon.Resource { + res := pcommon.NewResource() attrs := res.Attributes() attrs.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAzure) attrs.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformAzureVM) @@ -125,14 +125,14 @@ var ( attrs.InsertString("azure.vm.size", "42") return res }() - hostResource = func() pdata.Resource { - res := pdata.NewResource() + hostResource = func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString(conventions.AttributeHostName, "localhost") return res }() - unknownResource = func() pdata.Resource { - res := pdata.NewResource() + unknownResource = func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString(conventions.AttributeCloudProvider, "unknown") attr.InsertString(conventions.AttributeCloudAccountID, "1234") @@ -143,7 +143,7 @@ var ( func TestResourceToHostID(t *testing.T) { type args struct { - res pdata.Resource + res pcommon.Resource } tests := []struct { name string @@ -153,7 +153,7 @@ func TestResourceToHostID(t *testing.T) { }{ { name: "nil resource", - args: args{pdata.NewResource()}, + args: args{pcommon.NewResource()}, want: HostID{}, ok: false, }, @@ -258,10 +258,10 @@ func TestResourceToHostID(t *testing.T) { } func TestAzureID(t *testing.T) { - attrs := pdata.NewMap() - attrs.Insert("azure.resourcegroup.name", pdata.NewValueString("myResourceGroup")) - attrs.Insert("azure.vm.scaleset.name", pdata.NewValueString("myScalesetName")) - attrs.Insert(conventions.AttributeHostName, pdata.NewValueString("myScalesetName_1")) + attrs := pcommon.NewMap() + attrs.Insert("azure.resourcegroup.name", pcommon.NewValueString("myResourceGroup")) + attrs.Insert("azure.vm.scaleset.name", pcommon.NewValueString("myScalesetName")) + attrs.Insert(conventions.AttributeHostName, pcommon.NewValueString("myScalesetName_1")) id := azureID(attrs, "myCloudAccount") expected := "mycloudaccount/myresourcegroup/microsoft.compute/virtualmachinescalesets/myscalesetname/virtualmachines/1" assert.Equal(t, expected, id) diff --git a/internal/stanza/config.go b/internal/stanza/config.go index f4bdfeafee8c..7a62b0a6cadc 100644 --- a/internal/stanza/config.go +++ b/internal/stanza/config.go @@ -34,7 +34,7 @@ type BaseConfig struct { // but this allows a temporary solution type OperatorConfigs []map[string]interface{} -// ConverterConfig controls how the internal entry.Entry to pdata.Logs converter +// ConverterConfig controls how the internal entry.Entry to plog.Logs converter // works. type ConverterConfig struct { // MaxFlushCount defines the maximum number of entries that can be diff --git a/internal/stanza/converter.go b/internal/stanza/converter.go index 4091fa0209e2..521a4dffef98 100644 --- a/internal/stanza/converter.go +++ b/internal/stanza/converter.go @@ -27,11 +27,12 @@ import ( "sync" "github.com/open-telemetry/opentelemetry-log-collection/entry" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) -// Converter converts a batch of entry.Entry into pdata.Logs aggregating translated +// Converter converts a batch of entry.Entry into plog.Logs aggregating translated // entries into logs coming from the same Resource. // // The diagram below illustrates the internal communication inside the Converter: @@ -49,7 +50,7 @@ import ( // │ │ │ ┌─────────────────────────────────────────────────┴─┐ // └─┼─┼─► workerLoop() │ // └─┤ │ consumes sent log entries from workerChan, │ -// │ │ translates received entries to pdata.LogRecords,│ +// │ │ translates received entries to plog.LogRecords,│ // └─┤ hashes them to generate an ID, and sends them │ // │ onto batchChan │ // └─────────────────────────┬─────────────────────────┘ @@ -72,7 +73,7 @@ import ( // type Converter struct { // pLogsChan is a channel on which aggregated logs will be sent to. - pLogsChan chan pdata.Logs + pLogsChan chan plog.Logs stopOnce sync.Once stopChan chan struct{} @@ -87,8 +88,8 @@ type Converter struct { // on flushChan. aggregationChan chan []workerItem - // flushChan is an internal channel used for transporting batched pdata.Logs. - flushChan chan pdata.Logs + // flushChan is an internal channel used for transporting batched plog.Logs. + flushChan chan plog.Logs // wg is a WaitGroup that makes sure that we wait for spun up goroutines exit // when Stop() is called. @@ -124,10 +125,10 @@ func NewConverter(opts ...ConverterOption) *Converter { workerChan: make(chan []*entry.Entry), workerCount: int(math.Max(1, float64(runtime.NumCPU()/4))), aggregationChan: make(chan []workerItem), - pLogsChan: make(chan pdata.Logs), + pLogsChan: make(chan plog.Logs), stopChan: make(chan struct{}), logger: zap.NewNop(), - flushChan: make(chan pdata.Logs), + flushChan: make(chan plog.Logs), } for _, opt := range opts { @@ -161,18 +162,18 @@ func (c *Converter) Stop() { } // OutChannel returns the channel on which converted entries will be sent to. -func (c *Converter) OutChannel() <-chan pdata.Logs { +func (c *Converter) OutChannel() <-chan plog.Logs { return c.pLogsChan } type workerItem struct { Resource map[string]interface{} - LogRecord pdata.LogRecord + LogRecord plog.LogRecord ResourceID uint64 } // workerLoop is responsible for obtaining log entries from Batch() calls, -// converting them to pdata.LogRecords and sending them together with the +// converting them to plog.LogRecords and sending them together with the // associated Resource through the aggregationChan for aggregation. func (c *Converter) workerLoop() { defer c.wg.Done() @@ -213,7 +214,7 @@ func (c *Converter) workerLoop() { func (c *Converter) aggregationLoop() { defer c.wg.Done() - resourceIDToLogs := make(map[uint64]pdata.Logs) + resourceIDToLogs := make(map[uint64]plog.Logs) for { select { @@ -232,7 +233,7 @@ func (c *Converter) aggregationLoop() { continue } - pLogs = pdata.NewLogs() + pLogs = plog.NewLogs() logs := pLogs.ResourceLogs() rls := logs.AppendEmpty() @@ -277,8 +278,8 @@ func (c *Converter) flushLoop() { } } -// flush flushes provided pdata.Logs entries onto a channel. -func (c *Converter) flush(ctx context.Context, pLogs pdata.Logs) error { +// flush flushes provided plog.Logs entries onto a channel. +func (c *Converter) flush(ctx context.Context, pLogs plog.Logs) error { doneChan := ctx.Done() select { @@ -305,18 +306,18 @@ func (c *Converter) Batch(e []*entry.Entry) error { } } -// convert converts one entry.Entry into pdata.LogRecord allocating it. -func convert(ent *entry.Entry) pdata.LogRecord { - dest := pdata.NewLogRecord() +// convert converts one entry.Entry into plog.LogRecord allocating it. +func convert(ent *entry.Entry) plog.LogRecord { + dest := plog.NewLogRecord() convertInto(ent, dest) return dest } -// Convert converts one entry.Entry into pdata.Logs. +// Convert converts one entry.Entry into plog.Logs. // To be used in a stateless setting like tests where ease of use is more // important than performance or throughput. -func Convert(ent *entry.Entry) pdata.Logs { - pLogs := pdata.NewLogs() +func Convert(ent *entry.Entry) plog.Logs { + pLogs := plog.NewLogs() logs := pLogs.ResourceLogs() rls := logs.AppendEmpty() @@ -330,13 +331,13 @@ func Convert(ent *entry.Entry) pdata.Logs { return pLogs } -// convertInto converts entry.Entry into provided pdata.LogRecord. -func convertInto(ent *entry.Entry, dest pdata.LogRecord) { +// convertInto converts entry.Entry into provided plog.LogRecord. +func convertInto(ent *entry.Entry, dest plog.LogRecord) { t := ent.ObservedTimestamp if !ent.Timestamp.IsZero() { t = ent.Timestamp } - dest.SetTimestamp(pdata.NewTimestampFromTime(t)) + dest.SetTimestamp(pcommon.NewTimestampFromTime(t)) dest.SetSeverityNumber(sevMap[ent.Severity]) dest.SetSeverityText(sevTextMap[ent.Severity]) @@ -347,12 +348,12 @@ func convertInto(ent *entry.Entry, dest pdata.LogRecord) { if ent.TraceId != nil { var buffer [16]byte copy(buffer[0:16], ent.TraceId) - dest.SetTraceID(pdata.NewTraceID(buffer)) + dest.SetTraceID(pcommon.NewTraceID(buffer)) } if ent.SpanId != nil { var buffer [8]byte copy(buffer[0:8], ent.SpanId) - dest.SetSpanID(pdata.NewSpanID(buffer)) + dest.SetSpanID(pcommon.NewSpanID(buffer)) } if ent.TraceFlags != nil { // The 8 least significant bits are the trace flags as defined in W3C Trace @@ -364,7 +365,7 @@ func convertInto(ent *entry.Entry, dest pdata.LogRecord) { } } -func insertToAttributeVal(value interface{}, dest pdata.Value) { +func insertToAttributeVal(value interface{}, dest pcommon.Value) { switch t := value.(type) { case bool: dest.SetBoolVal(t) @@ -405,14 +406,14 @@ func insertToAttributeVal(value interface{}, dest pdata.Value) { } } -func toAttributeMap(obsMap map[string]interface{}) pdata.Value { - attVal := pdata.NewValueMap() +func toAttributeMap(obsMap map[string]interface{}) pcommon.Value { + attVal := pcommon.NewValueMap() attMap := attVal.MapVal() insertToAttributeMap(obsMap, attMap) return attVal } -func insertToAttributeMap(obsMap map[string]interface{}, dest pdata.Map) { +func insertToAttributeMap(obsMap map[string]interface{}, dest pcommon.Map) { dest.EnsureCapacity(len(obsMap)) for k, v := range obsMap { switch t := v.(type) { @@ -458,8 +459,8 @@ func insertToAttributeMap(obsMap map[string]interface{}, dest pdata.Map) { } } -func toAttributeArray(obsArr []interface{}) pdata.Value { - arrVal := pdata.NewValueSlice() +func toAttributeArray(obsArr []interface{}) pcommon.Value { + arrVal := pcommon.NewValueSlice() arr := arrVal.SliceVal() arr.EnsureCapacity(len(obsArr)) for _, v := range obsArr { @@ -468,32 +469,32 @@ func toAttributeArray(obsArr []interface{}) pdata.Value { return arrVal } -var sevMap = map[entry.Severity]pdata.SeverityNumber{ - entry.Default: pdata.SeverityNumberUNDEFINED, - entry.Trace: pdata.SeverityNumberTRACE, - entry.Trace2: pdata.SeverityNumberTRACE2, - entry.Trace3: pdata.SeverityNumberTRACE3, - entry.Trace4: pdata.SeverityNumberTRACE4, - entry.Debug: pdata.SeverityNumberDEBUG, - entry.Debug2: pdata.SeverityNumberDEBUG2, - entry.Debug3: pdata.SeverityNumberDEBUG3, - entry.Debug4: pdata.SeverityNumberDEBUG4, - entry.Info: pdata.SeverityNumberINFO, - entry.Info2: pdata.SeverityNumberINFO2, - entry.Info3: pdata.SeverityNumberINFO3, - entry.Info4: pdata.SeverityNumberINFO4, - entry.Warn: pdata.SeverityNumberWARN, - entry.Warn2: pdata.SeverityNumberWARN2, - entry.Warn3: pdata.SeverityNumberWARN3, - entry.Warn4: pdata.SeverityNumberWARN4, - entry.Error: pdata.SeverityNumberERROR, - entry.Error2: pdata.SeverityNumberERROR2, - entry.Error3: pdata.SeverityNumberERROR3, - entry.Error4: pdata.SeverityNumberERROR4, - entry.Fatal: pdata.SeverityNumberFATAL, - entry.Fatal2: pdata.SeverityNumberFATAL2, - entry.Fatal3: pdata.SeverityNumberFATAL3, - entry.Fatal4: pdata.SeverityNumberFATAL4, +var sevMap = map[entry.Severity]plog.SeverityNumber{ + entry.Default: plog.SeverityNumberUNDEFINED, + entry.Trace: plog.SeverityNumberTRACE, + entry.Trace2: plog.SeverityNumberTRACE2, + entry.Trace3: plog.SeverityNumberTRACE3, + entry.Trace4: plog.SeverityNumberTRACE4, + entry.Debug: plog.SeverityNumberDEBUG, + entry.Debug2: plog.SeverityNumberDEBUG2, + entry.Debug3: plog.SeverityNumberDEBUG3, + entry.Debug4: plog.SeverityNumberDEBUG4, + entry.Info: plog.SeverityNumberINFO, + entry.Info2: plog.SeverityNumberINFO2, + entry.Info3: plog.SeverityNumberINFO3, + entry.Info4: plog.SeverityNumberINFO4, + entry.Warn: plog.SeverityNumberWARN, + entry.Warn2: plog.SeverityNumberWARN2, + entry.Warn3: plog.SeverityNumberWARN3, + entry.Warn4: plog.SeverityNumberWARN4, + entry.Error: plog.SeverityNumberERROR, + entry.Error2: plog.SeverityNumberERROR2, + entry.Error3: plog.SeverityNumberERROR3, + entry.Error4: plog.SeverityNumberERROR4, + entry.Fatal: plog.SeverityNumberFATAL, + entry.Fatal2: plog.SeverityNumberFATAL2, + entry.Fatal3: plog.SeverityNumberFATAL3, + entry.Fatal4: plog.SeverityNumberFATAL4, } var sevTextMap = map[entry.Severity]string{ diff --git a/internal/stanza/converter_test.go b/internal/stanza/converter_test.go index ee0ef0161ca8..52d513d33557 100644 --- a/internal/stanza/converter_test.go +++ b/internal/stanza/converter_test.go @@ -26,7 +26,8 @@ import ( "github.com/open-telemetry/opentelemetry-log-collection/entry" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) func BenchmarkConvertSimple(b *testing.B) { @@ -181,12 +182,12 @@ func TestConvert(t *testing.T) { rls := pLogs.ResourceLogs().At(0) if resAtts := rls.Resource().Attributes(); assert.Equal(t, 5, resAtts.Len()) { - m := pdata.NewMap() + m := pcommon.NewMap() m.InsertBool("bool", true) m.InsertInt("int", 123) m.InsertDouble("double", 12.34) m.InsertString("string", "hello") - m.Insert("object", pdata.NewValueMap()) + m.Insert("object", pcommon.NewValueMap()) assert.EqualValues(t, m.Sort(), resAtts.Sort()) } @@ -198,21 +199,21 @@ func TestConvert(t *testing.T) { lr := logs.At(0) - assert.Equal(t, pdata.SeverityNumberERROR, lr.SeverityNumber()) + assert.Equal(t, plog.SeverityNumberERROR, lr.SeverityNumber()) assert.Equal(t, "Error", lr.SeverityText()) if atts := lr.Attributes(); assert.Equal(t, 5, atts.Len()) { - m := pdata.NewMap() + m := pcommon.NewMap() m.InsertBool("bool", true) m.InsertInt("int", 123) m.InsertDouble("double", 12.34) m.InsertString("string", "hello") - m.Insert("object", pdata.NewValueMap()) + m.Insert("object", pcommon.NewValueMap()) assert.EqualValues(t, m.Sort(), atts.Sort()) } - if assert.Equal(t, pdata.ValueTypeMap, lr.Body().Type()) { - m := pdata.NewMap() + if assert.Equal(t, pcommon.ValueTypeMap, lr.Body().Type()) { + m := pcommon.NewMap() // Don't include a nested object because AttributeValueMap sorting // doesn't sort recursively. m.InsertBool("bool", true) @@ -506,7 +507,7 @@ func TestConverterCancelledContextCancellsTheFlush(t *testing.T) { go func() { defer wg.Done() - pLogs := pdata.NewLogs() + pLogs := plog.NewLogs() ills := pLogs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty() lr := convert(complexEntry()) @@ -582,7 +583,7 @@ func TestConvertMetadata(t *testing.T) { require.Equal(t, "hello", attVal.StringVal()) bod := result.Body() - require.Equal(t, pdata.ValueTypeBool, bod.Type()) + require.Equal(t, pcommon.ValueTypeBool, bod.Type()) require.True(t, bod.BoolVal()) } @@ -736,47 +737,47 @@ func TestConvertNestedMapBody(t *testing.T) { require.Equal(t, fmt.Sprintf("%v", unknownType), unknownAttVal.StringVal()) } -func anyToBody(body interface{}) pdata.Value { +func anyToBody(body interface{}) pcommon.Value { entry := entry.New() entry.Body = body return convertAndDrill(entry).Body() } -func convertAndDrill(entry *entry.Entry) pdata.LogRecord { +func convertAndDrill(entry *entry.Entry) plog.LogRecord { return convert(entry) } func TestConvertSeverity(t *testing.T) { cases := []struct { severity entry.Severity - expectedNumber pdata.SeverityNumber + expectedNumber plog.SeverityNumber expectedText string }{ - {entry.Default, pdata.SeverityNumberUNDEFINED, ""}, - {entry.Trace, pdata.SeverityNumberTRACE, "Trace"}, - {entry.Trace2, pdata.SeverityNumberTRACE2, "Trace2"}, - {entry.Trace3, pdata.SeverityNumberTRACE3, "Trace3"}, - {entry.Trace4, pdata.SeverityNumberTRACE4, "Trace4"}, - {entry.Debug, pdata.SeverityNumberDEBUG, "Debug"}, - {entry.Debug2, pdata.SeverityNumberDEBUG2, "Debug2"}, - {entry.Debug3, pdata.SeverityNumberDEBUG3, "Debug3"}, - {entry.Debug4, pdata.SeverityNumberDEBUG4, "Debug4"}, - {entry.Info, pdata.SeverityNumberINFO, "Info"}, - {entry.Info2, pdata.SeverityNumberINFO2, "Info2"}, - {entry.Info3, pdata.SeverityNumberINFO3, "Info3"}, - {entry.Info4, pdata.SeverityNumberINFO4, "Info4"}, - {entry.Warn, pdata.SeverityNumberWARN, "Warn"}, - {entry.Warn2, pdata.SeverityNumberWARN2, "Warn2"}, - {entry.Warn3, pdata.SeverityNumberWARN3, "Warn3"}, - {entry.Warn4, pdata.SeverityNumberWARN4, "Warn4"}, - {entry.Error, pdata.SeverityNumberERROR, "Error"}, - {entry.Error2, pdata.SeverityNumberERROR2, "Error2"}, - {entry.Error3, pdata.SeverityNumberERROR3, "Error3"}, - {entry.Error4, pdata.SeverityNumberERROR4, "Error4"}, - {entry.Fatal, pdata.SeverityNumberFATAL, "Fatal"}, - {entry.Fatal2, pdata.SeverityNumberFATAL2, "Fatal2"}, - {entry.Fatal3, pdata.SeverityNumberFATAL3, "Fatal3"}, - {entry.Fatal4, pdata.SeverityNumberFATAL4, "Fatal4"}, + {entry.Default, plog.SeverityNumberUNDEFINED, ""}, + {entry.Trace, plog.SeverityNumberTRACE, "Trace"}, + {entry.Trace2, plog.SeverityNumberTRACE2, "Trace2"}, + {entry.Trace3, plog.SeverityNumberTRACE3, "Trace3"}, + {entry.Trace4, plog.SeverityNumberTRACE4, "Trace4"}, + {entry.Debug, plog.SeverityNumberDEBUG, "Debug"}, + {entry.Debug2, plog.SeverityNumberDEBUG2, "Debug2"}, + {entry.Debug3, plog.SeverityNumberDEBUG3, "Debug3"}, + {entry.Debug4, plog.SeverityNumberDEBUG4, "Debug4"}, + {entry.Info, plog.SeverityNumberINFO, "Info"}, + {entry.Info2, plog.SeverityNumberINFO2, "Info2"}, + {entry.Info3, plog.SeverityNumberINFO3, "Info3"}, + {entry.Info4, plog.SeverityNumberINFO4, "Info4"}, + {entry.Warn, plog.SeverityNumberWARN, "Warn"}, + {entry.Warn2, plog.SeverityNumberWARN2, "Warn2"}, + {entry.Warn3, plog.SeverityNumberWARN3, "Warn3"}, + {entry.Warn4, plog.SeverityNumberWARN4, "Warn4"}, + {entry.Error, plog.SeverityNumberERROR, "Error"}, + {entry.Error2, plog.SeverityNumberERROR2, "Error2"}, + {entry.Error3, plog.SeverityNumberERROR3, "Error3"}, + {entry.Error4, plog.SeverityNumberERROR4, "Error4"}, + {entry.Fatal, plog.SeverityNumberFATAL, "Fatal"}, + {entry.Fatal2, plog.SeverityNumberFATAL2, "Fatal2"}, + {entry.Fatal3, plog.SeverityNumberFATAL3, "Fatal3"}, + {entry.Fatal4, plog.SeverityNumberFATAL4, "Fatal4"}, } for _, tc := range cases { @@ -802,11 +803,11 @@ func TestConvertTrace(t *testing.T) { 0x01, }}) - require.Equal(t, pdata.NewTraceID( + require.Equal(t, pcommon.NewTraceID( [16]byte{ 0x48, 0x01, 0x40, 0xf3, 0xd7, 0x70, 0xa5, 0xae, 0x32, 0xf0, 0xa2, 0x2b, 0x6a, 0x81, 0x2c, 0xff, }), record.TraceID()) - require.Equal(t, pdata.NewSpanID( + require.Equal(t, pcommon.NewSpanID( [8]byte{ 0x32, 0xf0, 0xa2, 0x2b, 0x6a, 0x81, 0x2c, 0xff, }), record.SpanID()) diff --git a/internal/stanza/go.mod b/internal/stanza/go.mod index 73abd02b6f0b..7c16d634b632 100644 --- a/internal/stanza/go.mod +++ b/internal/stanza/go.mod @@ -6,8 +6,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.48.0 github.com/open-telemetry/opentelemetry-log-collection v0.29.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.opentelemetry.io/otel/metric v0.29.0 go.opentelemetry.io/otel/trace v1.6.3 go.uber.org/multierr v1.8.0 @@ -21,7 +21,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -31,7 +31,6 @@ require ( github.com/observiq/ctimefmt v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -43,3 +42,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage => ../../extension/storage + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/internal/stanza/go.sum b/internal/stanza/go.sum index 01760a98ecf3..47553be177d1 100644 --- a/internal/stanza/go.sum +++ b/internal/stanza/go.sum @@ -104,8 +104,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -164,8 +164,6 @@ github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6po github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -185,15 +183,15 @@ go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/internal/stanza/mocks_test.go b/internal/stanza/mocks_test.go index 61887b30d9cb..12f905d9865c 100644 --- a/internal/stanza/mocks_test.go +++ b/internal/stanza/mocks_test.go @@ -29,7 +29,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/extension/experimental/storage" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -85,7 +85,7 @@ func (m *mockLogsConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (m *mockLogsConsumer) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +func (m *mockLogsConsumer) ConsumeLogs(ctx context.Context, ld plog.Logs) error { atomic.AddInt32(&m.received, int32(ld.LogRecordCount())) return nil } @@ -107,7 +107,7 @@ func (m *mockLogsRejecter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (m *mockLogsRejecter) ConsumeLogs(ctx context.Context, ld pdata.Logs) error { +func (m *mockLogsRejecter) ConsumeLogs(ctx context.Context, ld plog.Logs) error { atomic.AddInt32(&m.rejected, 1) return fmt.Errorf("no") } diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 623aeac4044e..9a175b9b6f7e 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -198,3 +198,5 @@ require ( mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/batchperresourceattr/batchperresourceattr.go b/pkg/batchperresourceattr/batchperresourceattr.go index d620884b3108..a3fdae018bec 100644 --- a/pkg/batchperresourceattr/batchperresourceattr.go +++ b/pkg/batchperresourceattr/batchperresourceattr.go @@ -18,7 +18,9 @@ import ( "context" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" ) @@ -39,7 +41,7 @@ func (bt batchTraces) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: true} } -func (bt *batchTraces) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (bt *batchTraces) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { rss := td.ResourceSpans() lenRss := rss.Len() // If zero or one resource spans just call next. @@ -47,7 +49,7 @@ func (bt *batchTraces) ConsumeTraces(ctx context.Context, td pdata.Traces) error return bt.next.ConsumeTraces(ctx, td) } - tracesByAttr := make(map[string]pdata.Traces) + tracesByAttr := make(map[string]ptrace.Traces) for i := 0; i < lenRss; i++ { rs := rss.At(i) var attrVal string @@ -57,11 +59,11 @@ func (bt *batchTraces) ConsumeTraces(ctx context.Context, td pdata.Traces) error tracesForAttr, ok := tracesByAttr[attrVal] if !ok { - tracesForAttr = pdata.NewTraces() + tracesForAttr = ptrace.NewTraces() tracesByAttr[attrVal] = tracesForAttr } - // Append ResourceSpan to pdata.Traces for this attribute value. + // Append ResourceSpan to ptrace.Traces for this attribute value. tgt := tracesForAttr.ResourceSpans().AppendEmpty() rs.CopyTo(tgt) } @@ -90,7 +92,7 @@ func (bt batchMetrics) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: true} } -func (bt *batchMetrics) ConsumeMetrics(ctx context.Context, td pdata.Metrics) error { +func (bt *batchMetrics) ConsumeMetrics(ctx context.Context, td pmetric.Metrics) error { rms := td.ResourceMetrics() lenRms := rms.Len() // If zero or one resource spans just call next. @@ -98,7 +100,7 @@ func (bt *batchMetrics) ConsumeMetrics(ctx context.Context, td pdata.Metrics) er return bt.next.ConsumeMetrics(ctx, td) } - metricsByAttr := make(map[string]pdata.Metrics) + metricsByAttr := make(map[string]pmetric.Metrics) for i := 0; i < lenRms; i++ { rm := rms.At(i) var attrVal string @@ -108,11 +110,11 @@ func (bt *batchMetrics) ConsumeMetrics(ctx context.Context, td pdata.Metrics) er metricsForAttr, ok := metricsByAttr[attrVal] if !ok { - metricsForAttr = pdata.NewMetrics() + metricsForAttr = pmetric.NewMetrics() metricsByAttr[attrVal] = metricsForAttr } - // Append ResourceSpan to pdata.Metrics for this attribute value. + // Append ResourceSpan to pmetric.Metrics for this attribute value. tgt := metricsForAttr.ResourceMetrics().AppendEmpty() rm.CopyTo(tgt) } @@ -141,7 +143,7 @@ func (bt batchLogs) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: true} } -func (bt *batchLogs) ConsumeLogs(ctx context.Context, td pdata.Logs) error { +func (bt *batchLogs) ConsumeLogs(ctx context.Context, td plog.Logs) error { rls := td.ResourceLogs() lenRls := rls.Len() // If zero or one resource spans just call next. @@ -149,7 +151,7 @@ func (bt *batchLogs) ConsumeLogs(ctx context.Context, td pdata.Logs) error { return bt.next.ConsumeLogs(ctx, td) } - logsByAttr := make(map[string]pdata.Logs) + logsByAttr := make(map[string]plog.Logs) for i := 0; i < lenRls; i++ { rl := rls.At(i) var attrVal string @@ -159,11 +161,11 @@ func (bt *batchLogs) ConsumeLogs(ctx context.Context, td pdata.Logs) error { logsForAttr, ok := logsByAttr[attrVal] if !ok { - logsForAttr = pdata.NewLogs() + logsForAttr = plog.NewLogs() logsByAttr[attrVal] = logsForAttr } - // Append ResourceSpan to pdata.Logs for this attribute value. + // Append ResourceSpan to plog.Logs for this attribute value. tgt := logsForAttr.ResourceLogs().AppendEmpty() rl.CopyTo(tgt) } diff --git a/pkg/batchperresourceattr/batchperresourceattr_test.go b/pkg/batchperresourceattr/batchperresourceattr_test.go index ab7c8766f853..9b580b543dc0 100644 --- a/pkg/batchperresourceattr/batchperresourceattr_test.go +++ b/pkg/batchperresourceattr/batchperresourceattr_test.go @@ -25,12 +25,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestSplitTracesOneResourceSpans(t *testing.T) { - inBatch := pdata.NewTraces() - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("1")) + inBatch := ptrace.NewTraces() + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) sink := new(consumertest.TracesSink) bpr := NewBatchPerResourceTraces("attr_key", sink) @@ -41,9 +44,9 @@ func TestSplitTracesOneResourceSpans(t *testing.T) { } func TestSplitTracesReturnError(t *testing.T) { - inBatch := pdata.NewTraces() - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("1")) + inBatch := ptrace.NewTraces() + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) err := errors.New("test_error") bpr := NewBatchPerResourceTraces("attr_key", consumertest.NewErr(err)) @@ -51,11 +54,11 @@ func TestSplitTracesReturnError(t *testing.T) { } func TestSplitTracesSameResource(t *testing.T) { - inBatch := pdata.NewTraces() - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) + inBatch := ptrace.NewTraces() + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) sink := new(consumertest.TracesSink) bpr := NewBatchPerResourceTraces("same_attr_val", sink) @@ -66,16 +69,16 @@ func TestSplitTracesSameResource(t *testing.T) { } func TestSplitTracesIntoDifferentBatches(t *testing.T) { - inBatch := pdata.NewTraces() - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("2")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("3")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("4")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("2")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("3")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pdata.NewValueString("4")) - fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "diff_attr_key", pdata.NewValueString("1")) + inBatch := ptrace.NewTraces() + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("2")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("3")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("4")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("2")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("3")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "attr_key", pcommon.NewValueString("4")) + fillResourceSpans(inBatch.ResourceSpans().AppendEmpty(), "diff_attr_key", pcommon.NewValueString("1")) sink := new(consumertest.TracesSink) bpr := NewBatchPerResourceTraces("attr_key", sink) @@ -91,8 +94,8 @@ func TestSplitTracesIntoDifferentBatches(t *testing.T) { } func TestSplitMetricsOneResourceMetrics(t *testing.T) { - inBatch := pdata.NewMetrics() - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("1")) + inBatch := pmetric.NewMetrics() + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) sink := new(consumertest.MetricsSink) bpr := NewBatchPerResourceMetrics("attr_key", sink) @@ -103,9 +106,9 @@ func TestSplitMetricsOneResourceMetrics(t *testing.T) { } func TestSplitMetricsReturnError(t *testing.T) { - inBatch := pdata.NewMetrics() - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("1")) + inBatch := pmetric.NewMetrics() + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) err := errors.New("test_error") bpr := NewBatchPerResourceMetrics("attr_key", consumertest.NewErr(err)) @@ -113,11 +116,11 @@ func TestSplitMetricsReturnError(t *testing.T) { } func TestSplitMetricsSameResource(t *testing.T) { - inBatch := pdata.NewMetrics() - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) + inBatch := pmetric.NewMetrics() + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) sink := new(consumertest.MetricsSink) bpr := NewBatchPerResourceMetrics("same_attr_val", sink) @@ -128,16 +131,16 @@ func TestSplitMetricsSameResource(t *testing.T) { } func TestSplitMetricsIntoDifferentBatches(t *testing.T) { - inBatch := pdata.NewMetrics() - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("2")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("3")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("4")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("2")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("3")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString("4")) - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "diff_attr_key", pdata.NewValueString("1")) + inBatch := pmetric.NewMetrics() + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("2")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("3")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("4")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("2")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("3")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString("4")) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "diff_attr_key", pcommon.NewValueString("1")) sink := new(consumertest.MetricsSink) bpr := NewBatchPerResourceMetrics("attr_key", sink) @@ -153,8 +156,8 @@ func TestSplitMetricsIntoDifferentBatches(t *testing.T) { } func TestSplitLogsOneResourceLogs(t *testing.T) { - inBatch := pdata.NewLogs() - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("1")) + inBatch := plog.NewLogs() + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) sink := new(consumertest.LogsSink) bpr := NewBatchPerResourceLogs("attr_key", sink) @@ -165,9 +168,9 @@ func TestSplitLogsOneResourceLogs(t *testing.T) { } func TestSplitLogsReturnError(t *testing.T) { - inBatch := pdata.NewLogs() - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("1")) + inBatch := plog.NewLogs() + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) err := errors.New("test_error") bpr := NewBatchPerResourceLogs("attr_key", consumertest.NewErr(err)) @@ -175,11 +178,11 @@ func TestSplitLogsReturnError(t *testing.T) { } func TestSplitLogsSameResource(t *testing.T) { - inBatch := pdata.NewLogs() - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pdata.NewValueString("1")) + inBatch := plog.NewLogs() + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "same_attr_val", pcommon.NewValueString("1")) sink := new(consumertest.LogsSink) bpr := NewBatchPerResourceLogs("same_attr_val", sink) @@ -190,16 +193,16 @@ func TestSplitLogsSameResource(t *testing.T) { } func TestSplitLogsIntoDifferentBatches(t *testing.T) { - inBatch := pdata.NewLogs() - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("2")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("3")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("4")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("1")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("2")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("3")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString("4")) - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "diff_attr_key", pdata.NewValueString("1")) + inBatch := plog.NewLogs() + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("2")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("3")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("4")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("1")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("2")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("3")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString("4")) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "diff_attr_key", pcommon.NewValueString("1")) sink := new(consumertest.LogsSink) bpr := NewBatchPerResourceLogs("attr_key", sink) @@ -214,15 +217,15 @@ func TestSplitLogsIntoDifferentBatches(t *testing.T) { assert.Equal(t, newLogs(inBatch.ResourceLogs().At(3), inBatch.ResourceLogs().At(7)), outBatches[4]) } -func newTraces(rss ...pdata.ResourceSpans) pdata.Traces { - td := pdata.NewTraces() +func newTraces(rss ...ptrace.ResourceSpans) ptrace.Traces { + td := ptrace.NewTraces() for _, rs := range rss { rs.CopyTo(td.ResourceSpans().AppendEmpty()) } return td } -func sortTraces(tds []pdata.Traces, attrKey string) { +func sortTraces(tds []ptrace.Traces, attrKey string) { sort.Slice(tds, func(i, j int) bool { valI := "" if av, ok := tds[i].ResourceSpans().At(0).Resource().Attributes().Get(attrKey); ok { @@ -236,27 +239,27 @@ func sortTraces(tds []pdata.Traces, attrKey string) { }) } -func fillResourceSpans(rs pdata.ResourceSpans, key string, val pdata.Value) { +func fillResourceSpans(rs ptrace.ResourceSpans, key string, val pcommon.Value) { rs.Resource().Attributes().Upsert(key, val) - rs.Resource().Attributes().Upsert("__other_key__", pdata.NewValueInt(123)) + rs.Resource().Attributes().Upsert("__other_key__", pcommon.NewValueInt(123)) ils := rs.ScopeSpans().AppendEmpty() firstSpan := ils.Spans().AppendEmpty() firstSpan.SetName("first-span") - firstSpan.SetTraceID(pdata.NewTraceID([16]byte{byte(rand.Int())})) + firstSpan.SetTraceID(pcommon.NewTraceID([16]byte{byte(rand.Int())})) secondSpan := ils.Spans().AppendEmpty() secondSpan.SetName("second-span") - secondSpan.SetTraceID(pdata.NewTraceID([16]byte{byte(rand.Int())})) + secondSpan.SetTraceID(pcommon.NewTraceID([16]byte{byte(rand.Int())})) } -func newMetrics(rms ...pdata.ResourceMetrics) pdata.Metrics { - md := pdata.NewMetrics() +func newMetrics(rms ...pmetric.ResourceMetrics) pmetric.Metrics { + md := pmetric.NewMetrics() for _, rm := range rms { rm.CopyTo(md.ResourceMetrics().AppendEmpty()) } return md } -func sortMetrics(tds []pdata.Metrics, attrKey string) { +func sortMetrics(tds []pmetric.Metrics, attrKey string) { sort.Slice(tds, func(i, j int) bool { valI := "" if av, ok := tds[i].ResourceMetrics().At(0).Resource().Attributes().Get(attrKey); ok { @@ -270,27 +273,27 @@ func sortMetrics(tds []pdata.Metrics, attrKey string) { }) } -func fillResourceMetrics(rs pdata.ResourceMetrics, key string, val pdata.Value) { +func fillResourceMetrics(rs pmetric.ResourceMetrics, key string, val pcommon.Value) { rs.Resource().Attributes().Upsert(key, val) - rs.Resource().Attributes().Upsert("__other_key__", pdata.NewValueInt(123)) + rs.Resource().Attributes().Upsert("__other_key__", pcommon.NewValueInt(123)) ils := rs.ScopeMetrics().AppendEmpty() firstMetric := ils.Metrics().AppendEmpty() firstMetric.SetName("first-metric") - firstMetric.SetDataType(pdata.MetricDataType(rand.Int() % 4)) + firstMetric.SetDataType(pmetric.MetricDataType(rand.Int() % 4)) secondMetric := ils.Metrics().AppendEmpty() secondMetric.SetName("second-metric") - secondMetric.SetDataType(pdata.MetricDataType(rand.Int() % 4)) + secondMetric.SetDataType(pmetric.MetricDataType(rand.Int() % 4)) } -func newLogs(rls ...pdata.ResourceLogs) pdata.Logs { - ld := pdata.NewLogs() +func newLogs(rls ...plog.ResourceLogs) plog.Logs { + ld := plog.NewLogs() for _, rl := range rls { rl.CopyTo(ld.ResourceLogs().AppendEmpty()) } return ld } -func sortLogs(tds []pdata.Logs, attrKey string) { +func sortLogs(tds []plog.Logs, attrKey string) { sort.Slice(tds, func(i, j int) bool { valI := "" if av, ok := tds[i].ResourceLogs().At(0).Resource().Attributes().Get(attrKey); ok { @@ -304,9 +307,9 @@ func sortLogs(tds []pdata.Logs, attrKey string) { }) } -func fillResourceLogs(rs pdata.ResourceLogs, key string, val pdata.Value) { +func fillResourceLogs(rs plog.ResourceLogs, key string, val pcommon.Value) { rs.Resource().Attributes().Upsert(key, val) - rs.Resource().Attributes().Upsert("__other_key__", pdata.NewValueInt(123)) + rs.Resource().Attributes().Upsert("__other_key__", pcommon.NewValueInt(123)) ils := rs.ScopeLogs().AppendEmpty() firstLogRecord := ils.LogRecords().AppendEmpty() firstLogRecord.SetName("first-log-record") @@ -317,11 +320,11 @@ func fillResourceLogs(rs pdata.ResourceLogs, key string, val pdata.Value) { } func BenchmarkBatchPerResourceTraces(b *testing.B) { - inBatch := pdata.NewTraces() + inBatch := ptrace.NewTraces() rss := inBatch.ResourceSpans() rss.EnsureCapacity(64) for i := 0; i < 64; i++ { - fillResourceSpans(rss.AppendEmpty(), "attr_key", pdata.NewValueString(strconv.Itoa(i%8))) + fillResourceSpans(rss.AppendEmpty(), "attr_key", pcommon.NewValueString(strconv.Itoa(i%8))) } bpr := NewBatchPerResourceTraces("attr_key", consumertest.NewNop()) b.ReportAllocs() @@ -334,10 +337,10 @@ func BenchmarkBatchPerResourceTraces(b *testing.B) { } func BenchmarkBatchPerResourceMetrics(b *testing.B) { - inBatch := pdata.NewMetrics() + inBatch := pmetric.NewMetrics() inBatch.ResourceMetrics().EnsureCapacity(64) for i := 0; i < 64; i++ { - fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pdata.NewValueString(strconv.Itoa(i%8))) + fillResourceMetrics(inBatch.ResourceMetrics().AppendEmpty(), "attr_key", pcommon.NewValueString(strconv.Itoa(i%8))) } bpr := NewBatchPerResourceMetrics("attr_key", consumertest.NewNop()) b.ReportAllocs() @@ -350,10 +353,10 @@ func BenchmarkBatchPerResourceMetrics(b *testing.B) { } func BenchmarkBatchPerResourceLogs(b *testing.B) { - inBatch := pdata.NewLogs() + inBatch := plog.NewLogs() inBatch.ResourceLogs().EnsureCapacity(64) for i := 0; i < 64; i++ { - fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pdata.NewValueString(strconv.Itoa(i%8))) + fillResourceLogs(inBatch.ResourceLogs().AppendEmpty(), "attr_key", pcommon.NewValueString(strconv.Itoa(i%8))) } bpr := NewBatchPerResourceLogs("attr_key", consumertest.NewNop()) b.ReportAllocs() diff --git a/pkg/batchperresourceattr/go.mod b/pkg/batchperresourceattr/go.mod index c40ae3ce828d..b2a7fde8b944 100644 --- a/pkg/batchperresourceattr/go.mod +++ b/pkg/batchperresourceattr/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) @@ -18,3 +18,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/batchperresourceattr/go.sum b/pkg/batchperresourceattr/go.sum index 47b2cc1a0159..8d0f35a42d1b 100644 --- a/pkg/batchperresourceattr/go.sum +++ b/pkg/batchperresourceattr/go.sum @@ -25,10 +25,10 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/pkg/batchpersignal/batchpersignal.go b/pkg/batchpersignal/batchpersignal.go index 091c422a081c..0ff4a41bc3f5 100644 --- a/pkg/batchpersignal/batchpersignal.go +++ b/pkg/batchpersignal/batchpersignal.go @@ -14,20 +14,24 @@ package batchpersignal // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal" -import "go.opentelemetry.io/collector/model/pdata" - -// SplitTraces returns one pdata.Traces for each trace in the given pdata.Traces input. Each of the resulting pdata.Traces contains exactly one trace. -func SplitTraces(batch pdata.Traces) []pdata.Traces { +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +// SplitTraces returns one ptrace.Traces for each trace in the given ptrace.Traces input. Each of the resulting ptrace.Traces contains exactly one trace. +func SplitTraces(batch ptrace.Traces) []ptrace.Traces { // for each span in the resource spans, we group them into batches of rs/ils/traceID. // if the same traceID exists in different ils, they land in different batches. - var result []pdata.Traces + var result []ptrace.Traces for i := 0; i < batch.ResourceSpans().Len(); i++ { rs := batch.ResourceSpans().At(i) for j := 0; j < rs.ScopeSpans().Len(); j++ { // the batches for this ILS - batches := map[pdata.TraceID]pdata.ResourceSpans{} + batches := map[pcommon.TraceID]ptrace.ResourceSpans{} ils := rs.ScopeSpans().At(j) for k := 0; k < ils.Spans().Len(); k++ { @@ -37,7 +41,7 @@ func SplitTraces(batch pdata.Traces) []pdata.Traces { // for the first traceID in the ILS, initialize the map entry // and add the singleTraceBatch to the result list if _, ok := batches[key]; !ok { - trace := pdata.NewTraces() + trace := ptrace.NewTraces() newRS := trace.ResourceSpans().AppendEmpty() // currently, the ResourceSpans implementation has only a Resource and an ILS. We'll copy the Resource // and set our own ILS @@ -62,18 +66,18 @@ func SplitTraces(batch pdata.Traces) []pdata.Traces { return result } -// SplitLogs returns one pdata.Logs for each trace in the given pdata.Logs input. Each of the resulting pdata.Logs contains exactly one trace. -func SplitLogs(batch pdata.Logs) []pdata.Logs { +// SplitLogs returns one plog.Logs for each trace in the given plog.Logs input. Each of the resulting plog.Logs contains exactly one trace. +func SplitLogs(batch plog.Logs) []plog.Logs { // for each log in the resource logs, we group them into batches of rl/sl/traceID. // if the same traceID exists in different sl, they land in different batches. - var result []pdata.Logs + var result []plog.Logs for i := 0; i < batch.ResourceLogs().Len(); i++ { rs := batch.ResourceLogs().At(i) for j := 0; j < rs.ScopeLogs().Len(); j++ { // the batches for this ILL - batches := map[pdata.TraceID]pdata.ResourceLogs{} + batches := map[pcommon.TraceID]plog.ResourceLogs{} sl := rs.ScopeLogs().At(j) for k := 0; k < sl.LogRecords().Len(); k++ { @@ -83,7 +87,7 @@ func SplitLogs(batch pdata.Logs) []pdata.Logs { // for the first traceID in the ILL, initialize the map entry // and add the singleTraceBatch to the result list if _, ok := batches[key]; !ok { - logs := pdata.NewLogs() + logs := plog.NewLogs() newRL := logs.ResourceLogs().AppendEmpty() // currently, the ResourceLogs implementation has only a Resource and an ILL. We'll copy the Resource // and set our own ILL diff --git a/pkg/batchpersignal/batchpersignal_test.go b/pkg/batchpersignal/batchpersignal_test.go index 48b58653657f..1b23103905da 100644 --- a/pkg/batchpersignal/batchpersignal_test.go +++ b/pkg/batchpersignal/batchpersignal_test.go @@ -18,12 +18,14 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestSplitDifferentTracesIntoDifferentBatches(t *testing.T) { // we have 1 ResourceSpans with 1 ILS and two traceIDs, resulting in two batches - inBatch := pdata.NewTraces() + inBatch := ptrace.NewTraces() rs := inBatch.ResourceSpans().AppendEmpty() // the first ILS has two spans @@ -32,10 +34,10 @@ func TestSplitDifferentTracesIntoDifferentBatches(t *testing.T) { library.SetName("first-library") firstSpan := ils.Spans().AppendEmpty() firstSpan.SetName("first-batch-first-span") - firstSpan.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + firstSpan.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) secondSpan := ils.Spans().AppendEmpty() secondSpan.SetName("first-batch-second-span") - secondSpan.SetTraceID(pdata.NewTraceID([16]byte{2, 3, 4, 5})) + secondSpan.SetTraceID(pcommon.NewTraceID([16]byte{2, 3, 4, 5})) // test out := SplitTraces(inBatch) @@ -56,23 +58,23 @@ func TestSplitDifferentTracesIntoDifferentBatches(t *testing.T) { func TestSplitTracesWithNilTraceID(t *testing.T) { // prepare - inBatch := pdata.NewTraces() + inBatch := ptrace.NewTraces() rs := inBatch.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() firstSpan := ils.Spans().AppendEmpty() - firstSpan.SetTraceID(pdata.NewTraceID([16]byte{})) + firstSpan.SetTraceID(pcommon.NewTraceID([16]byte{})) // test batches := SplitTraces(inBatch) // verify assert.Len(t, batches, 1) - assert.Equal(t, pdata.NewTraceID([16]byte{}), batches[0].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID()) + assert.Equal(t, pcommon.NewTraceID([16]byte{}), batches[0].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID()) } func TestSplitSameTraceIntoDifferentBatches(t *testing.T) { // prepare - inBatch := pdata.NewTraces() + inBatch := ptrace.NewTraces() rs := inBatch.ResourceSpans().AppendEmpty() // we have 1 ResourceSpans with 2 ILS, resulting in two batches @@ -85,10 +87,10 @@ func TestSplitSameTraceIntoDifferentBatches(t *testing.T) { firstILS.Spans().EnsureCapacity(2) firstSpan := firstILS.Spans().AppendEmpty() firstSpan.SetName("first-batch-first-span") - firstSpan.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + firstSpan.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) secondSpan := firstILS.Spans().AppendEmpty() secondSpan.SetName("first-batch-second-span") - secondSpan.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + secondSpan.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) // the second ILS has one span secondILS := rs.ScopeSpans().AppendEmpty() @@ -96,7 +98,7 @@ func TestSplitSameTraceIntoDifferentBatches(t *testing.T) { secondLibrary.SetName("second-library") thirdSpan := secondILS.Spans().AppendEmpty() thirdSpan.SetName("second-batch-first-span") - thirdSpan.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + thirdSpan.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) // test batches := SplitTraces(inBatch) @@ -105,20 +107,20 @@ func TestSplitSameTraceIntoDifferentBatches(t *testing.T) { assert.Len(t, batches, 2) // first batch - assert.Equal(t, pdata.NewTraceID([16]byte{1, 2, 3, 4}), batches[0].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID()) + assert.Equal(t, pcommon.NewTraceID([16]byte{1, 2, 3, 4}), batches[0].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID()) assert.Equal(t, firstLibrary.Name(), batches[0].ResourceSpans().At(0).ScopeSpans().At(0).Scope().Name()) assert.Equal(t, firstSpan.Name(), batches[0].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Name()) assert.Equal(t, secondSpan.Name(), batches[0].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Name()) // second batch - assert.Equal(t, pdata.NewTraceID([16]byte{1, 2, 3, 4}), batches[1].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID()) + assert.Equal(t, pcommon.NewTraceID([16]byte{1, 2, 3, 4}), batches[1].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID()) assert.Equal(t, secondLibrary.Name(), batches[1].ResourceSpans().At(0).ScopeSpans().At(0).Scope().Name()) assert.Equal(t, thirdSpan.Name(), batches[1].ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Name()) } func TestSplitDifferentLogsIntoDifferentBatches(t *testing.T) { // we have 1 ResourceLogs with 1 ILL and three traceIDs (one null) resulting in three batches - inBatch := pdata.NewLogs() + inBatch := plog.NewLogs() rl := inBatch.ResourceLogs().AppendEmpty() // the first ILL has three logs @@ -128,10 +130,10 @@ func TestSplitDifferentLogsIntoDifferentBatches(t *testing.T) { sl.LogRecords().EnsureCapacity(3) firstLog := sl.LogRecords().AppendEmpty() firstLog.SetName("first-batch-first-log") - firstLog.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + firstLog.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) secondLog := sl.LogRecords().AppendEmpty() secondLog.SetName("first-batch-second-log") - secondLog.SetTraceID(pdata.NewTraceID([16]byte{2, 3, 4, 5})) + secondLog.SetTraceID(pcommon.NewTraceID([16]byte{2, 3, 4, 5})) thirdLog := sl.LogRecords().AppendEmpty() thirdLog.SetName("first-batch-third-log") // do not set traceID for third log @@ -160,23 +162,23 @@ func TestSplitDifferentLogsIntoDifferentBatches(t *testing.T) { func TestSplitLogsWithNilTraceID(t *testing.T) { // prepare - inBatch := pdata.NewLogs() + inBatch := plog.NewLogs() rl := inBatch.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() firstLog := sl.LogRecords().AppendEmpty() - firstLog.SetTraceID(pdata.NewTraceID([16]byte{})) + firstLog.SetTraceID(pcommon.NewTraceID([16]byte{})) // test batches := SplitLogs(inBatch) // verify assert.Len(t, batches, 1) - assert.Equal(t, pdata.NewTraceID([16]byte{}), batches[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).TraceID()) + assert.Equal(t, pcommon.NewTraceID([16]byte{}), batches[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).TraceID()) } func TestSplitLogsSameTraceIntoDifferentBatches(t *testing.T) { // prepare - inBatch := pdata.NewLogs() + inBatch := plog.NewLogs() rl := inBatch.ResourceLogs().AppendEmpty() // we have 1 ResourceLogs with 2 ILL, resulting in two batches @@ -189,10 +191,10 @@ func TestSplitLogsSameTraceIntoDifferentBatches(t *testing.T) { firstILS.LogRecords().EnsureCapacity(2) firstLog := firstILS.LogRecords().AppendEmpty() firstLog.SetName("first-batch-first-log") - firstLog.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + firstLog.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) secondLog := firstILS.LogRecords().AppendEmpty() secondLog.SetName("first-batch-second-log") - secondLog.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + secondLog.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) // the second ILL has one log secondILS := rl.ScopeLogs().AppendEmpty() @@ -200,7 +202,7 @@ func TestSplitLogsSameTraceIntoDifferentBatches(t *testing.T) { secondLibrary.SetName("second-library") thirdLog := secondILS.LogRecords().AppendEmpty() thirdLog.SetName("second-batch-first-log") - thirdLog.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + thirdLog.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) // test batches := SplitLogs(inBatch) @@ -209,13 +211,13 @@ func TestSplitLogsSameTraceIntoDifferentBatches(t *testing.T) { assert.Len(t, batches, 2) // first batch - assert.Equal(t, pdata.NewTraceID([16]byte{1, 2, 3, 4}), batches[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).TraceID()) + assert.Equal(t, pcommon.NewTraceID([16]byte{1, 2, 3, 4}), batches[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).TraceID()) assert.Equal(t, firstLibrary.Name(), batches[0].ResourceLogs().At(0).ScopeLogs().At(0).Scope().Name()) assert.Equal(t, firstLog.Name(), batches[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Name()) assert.Equal(t, secondLog.Name(), batches[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(1).Name()) // second batch - assert.Equal(t, pdata.NewTraceID([16]byte{1, 2, 3, 4}), batches[1].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).TraceID()) + assert.Equal(t, pcommon.NewTraceID([16]byte{1, 2, 3, 4}), batches[1].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).TraceID()) assert.Equal(t, secondLibrary.Name(), batches[1].ResourceLogs().At(0).ScopeLogs().At(0).Scope().Name()) assert.Equal(t, thirdLog.Name(), batches[1].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Name()) } diff --git a/pkg/batchpersignal/go.mod b/pkg/batchpersignal/go.mod index 7fd69d2f6122..18cfadf73e34 100644 --- a/pkg/batchpersignal/go.mod +++ b/pkg/batchpersignal/go.mod @@ -4,7 +4,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( @@ -13,3 +13,5 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/batchpersignal/go.sum b/pkg/batchpersignal/go.sum index 704834f82be1..9425169d4edc 100644 --- a/pkg/batchpersignal/go.sum +++ b/pkg/batchpersignal/go.sum @@ -12,8 +12,8 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= diff --git a/pkg/experimentalmetricmetadata/go.mod b/pkg/experimentalmetricmetadata/go.mod index 0e730907975a..cfa57efd3e7a 100644 --- a/pkg/experimentalmetricmetadata/go.mod +++ b/pkg/experimentalmetricmetadata/go.mod @@ -11,3 +11,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/resourcetotelemetry/go.mod b/pkg/resourcetotelemetry/go.mod index dd42bda38f73..73cc609839c4 100644 --- a/pkg/resourcetotelemetry/go.mod +++ b/pkg/resourcetotelemetry/go.mod @@ -5,21 +5,20 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -30,3 +29,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/resourcetotelemetry/go.sum b/pkg/resourcetotelemetry/go.sum index 9c842bb2824b..25c5fe861e79 100644 --- a/pkg/resourcetotelemetry/go.sum +++ b/pkg/resourcetotelemetry/go.sum @@ -69,8 +69,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -110,8 +110,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -123,10 +121,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= @@ -184,7 +182,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/pkg/resourcetotelemetry/resource_to_telemetry.go b/pkg/resourcetotelemetry/resource_to_telemetry.go index f203f165ff06..7886c4a379b9 100644 --- a/pkg/resourcetotelemetry/resource_to_telemetry.go +++ b/pkg/resourcetotelemetry/resource_to_telemetry.go @@ -19,7 +19,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Settings defines configuration for converting resource attributes to telemetry attributes. @@ -37,7 +38,7 @@ type wrapperMetricsExporter struct { component.MetricsExporter } -func (wme *wrapperMetricsExporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (wme *wrapperMetricsExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { return wme.MetricsExporter.ConsumeMetrics(ctx, convertToMetricsAttributes(md)) } @@ -55,7 +56,7 @@ func WrapMetricsExporter(set Settings, exporter component.MetricsExporter) compo return &wrapperMetricsExporter{MetricsExporter: exporter} } -func convertToMetricsAttributes(md pdata.Metrics) pdata.Metrics { +func convertToMetricsAttributes(md pmetric.Metrics) pmetric.Metrics { cloneMd := md.Clone() rms := cloneMd.ResourceMetrics() for i := 0; i < rms.Len(); i++ { @@ -75,47 +76,47 @@ func convertToMetricsAttributes(md pdata.Metrics) pdata.Metrics { } // addAttributesToMetric adds additional labels to the given metric -func addAttributesToMetric(metric *pdata.Metric, labelMap pdata.Map) { +func addAttributesToMetric(metric *pmetric.Metric, labelMap pcommon.Map) { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: addAttributesToNumberDataPoints(metric.Gauge().DataPoints(), labelMap) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: addAttributesToNumberDataPoints(metric.Sum().DataPoints(), labelMap) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: addAttributesToHistogramDataPoints(metric.Histogram().DataPoints(), labelMap) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: addAttributesToSummaryDataPoints(metric.Summary().DataPoints(), labelMap) - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: addAttributesToExponentialHistogramDataPoints(metric.ExponentialHistogram().DataPoints(), labelMap) } } -func addAttributesToNumberDataPoints(ps pdata.NumberDataPointSlice, newAttributeMap pdata.Map) { +func addAttributesToNumberDataPoints(ps pmetric.NumberDataPointSlice, newAttributeMap pcommon.Map) { for i := 0; i < ps.Len(); i++ { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } -func addAttributesToHistogramDataPoints(ps pdata.HistogramDataPointSlice, newAttributeMap pdata.Map) { +func addAttributesToHistogramDataPoints(ps pmetric.HistogramDataPointSlice, newAttributeMap pcommon.Map) { for i := 0; i < ps.Len(); i++ { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } -func addAttributesToSummaryDataPoints(ps pdata.SummaryDataPointSlice, newAttributeMap pdata.Map) { +func addAttributesToSummaryDataPoints(ps pmetric.SummaryDataPointSlice, newAttributeMap pcommon.Map) { for i := 0; i < ps.Len(); i++ { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } -func addAttributesToExponentialHistogramDataPoints(ps pdata.ExponentialHistogramDataPointSlice, newAttributeMap pdata.Map) { +func addAttributesToExponentialHistogramDataPoints(ps pmetric.ExponentialHistogramDataPointSlice, newAttributeMap pcommon.Map) { for i := 0; i < ps.Len(); i++ { joinAttributeMaps(newAttributeMap, ps.At(i).Attributes()) } } -func joinAttributeMaps(from, to pdata.Map) { - from.Range(func(k string, v pdata.Value) bool { +func joinAttributeMaps(from, to pcommon.Map) { + from.Range(func(k string, v pcommon.Value) bool { to.Upsert(k, v) return true }) diff --git a/pkg/translator/jaeger/go.mod b/pkg/translator/jaeger/go.mod index b84c58a82ad0..c430d2de5b7c 100644 --- a/pkg/translator/jaeger/go.mod +++ b/pkg/translator/jaeger/go.mod @@ -6,7 +6,8 @@ require ( github.com/jaegertracing/jaeger v1.32.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( @@ -25,3 +26,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/translator/jaeger/go.sum b/pkg/translator/jaeger/go.sum index f3576566d10d..0ab947cff908 100644 --- a/pkg/translator/jaeger/go.sum +++ b/pkg/translator/jaeger/go.sum @@ -40,8 +40,10 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/pkg/translator/jaeger/jaegerproto_to_traces.go b/pkg/translator/jaeger/jaegerproto_to_traces.go index 9c739d2f5bbd..25c3ab574037 100644 --- a/pkg/translator/jaeger/jaegerproto_to_traces.go +++ b/pkg/translator/jaeger/jaegerproto_to_traces.go @@ -22,8 +22,9 @@ import ( "strings" "github.com/jaegertracing/jaeger/model" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -33,8 +34,8 @@ import ( var blankJaegerProtoSpan = new(model.Span) // ProtoToTraces converts multiple Jaeger proto batches to internal traces -func ProtoToTraces(batches []*model.Batch) (pdata.Traces, error) { - traceData := pdata.NewTraces() +func ProtoToTraces(batches []*model.Batch) (ptrace.Traces, error) { + traceData := ptrace.NewTraces() if len(batches) == 0 { return traceData, nil } @@ -53,7 +54,7 @@ func ProtoToTraces(batches []*model.Batch) (pdata.Traces, error) { return traceData, nil } -func protoBatchToResourceSpans(batch model.Batch, dest pdata.ResourceSpans) { +func protoBatchToResourceSpans(batch model.Batch, dest ptrace.ResourceSpans) { jSpans := batch.GetSpans() jProcessToInternalResource(batch.GetProcess(), dest.Resource()) @@ -74,7 +75,7 @@ func protoBatchToResourceSpans(batch model.Batch, dest pdata.ResourceSpans) { } } -func jProcessToInternalResource(process *model.Process, dest pdata.Resource) { +func jProcessToInternalResource(process *model.Process, dest pcommon.Resource) { if process == nil || process.ServiceName == tracetranslator.ResourceNoServiceName { return } @@ -101,7 +102,7 @@ func jProcessToInternalResource(process *model.Process, dest pdata.Resource) { } // translateHostnameAttr translates "hostname" atttribute -func translateHostnameAttr(attrs pdata.Map) { +func translateHostnameAttr(attrs pcommon.Map) { hostname, hostnameFound := attrs.Get("hostname") _, convHostNameFound := attrs.Get(conventions.AttributeHostName) if hostnameFound && !convHostNameFound { @@ -111,7 +112,7 @@ func translateHostnameAttr(attrs pdata.Map) { } // translateHostnameAttr translates "jaeger.version" atttribute -func translateJaegerVersionAttr(attrs pdata.Map) { +func translateJaegerVersionAttr(attrs pcommon.Map) { jaegerVersion, jaegerVersionFound := attrs.Get("jaeger.version") _, exporterVersionFound := attrs.Get(occonventions.AttributeExporterVersion) if jaegerVersionFound && !exporterVersionFound { @@ -120,8 +121,8 @@ func translateJaegerVersionAttr(attrs pdata.Map) { } } -func jSpansToInternal(spans []*model.Span) map[instrumentationLibrary]pdata.SpanSlice { - spansByLibrary := make(map[instrumentationLibrary]pdata.SpanSlice) +func jSpansToInternal(spans []*model.Span) map[instrumentationLibrary]ptrace.SpanSlice { + spansByLibrary := make(map[instrumentationLibrary]ptrace.SpanSlice) for _, span := range spans { if span == nil || reflect.DeepEqual(span, blankJaegerProtoSpan) { @@ -136,11 +137,11 @@ type instrumentationLibrary struct { name, version string } -func jSpanToInternal(span *model.Span, spansByLibrary map[instrumentationLibrary]pdata.SpanSlice) { +func jSpanToInternal(span *model.Span, spansByLibrary map[instrumentationLibrary]ptrace.SpanSlice) { il := getInstrumentationLibrary(span) ss, found := spansByLibrary[il] if !found { - ss = pdata.NewSpanSlice() + ss = ptrace.NewSpanSlice() spansByLibrary[il] = ss } @@ -148,8 +149,8 @@ func jSpanToInternal(span *model.Span, spansByLibrary map[instrumentationLibrary dest.SetTraceID(idutils.UInt64ToTraceID(span.TraceID.High, span.TraceID.Low)) dest.SetSpanID(idutils.UInt64ToSpanID(uint64(span.SpanID))) dest.SetName(span.OperationName) - dest.SetStartTimestamp(pdata.NewTimestampFromTime(span.StartTime)) - dest.SetEndTimestamp(pdata.NewTimestampFromTime(span.StartTime.Add(span.Duration))) + dest.SetStartTimestamp(pcommon.NewTimestampFromTime(span.StartTime)) + dest.SetEndTimestamp(pcommon.NewTimestampFromTime(span.StartTime.Add(span.Duration))) parentSpanID := span.ParentSpanID() if parentSpanID != model.SpanID(0) { @@ -176,7 +177,7 @@ func jSpanToInternal(span *model.Span, spansByLibrary map[instrumentationLibrary jReferencesToSpanLinks(span.References, parentSpanID, dest.Links()) } -func jTagsToInternalAttributes(tags []model.KeyValue, dest pdata.Map) { +func jTagsToInternalAttributes(tags []model.KeyValue, dest pcommon.Map) { for _, tag := range tags { switch tag.GetVType() { case model.ValueType_STRING: @@ -195,14 +196,14 @@ func jTagsToInternalAttributes(tags []model.KeyValue, dest pdata.Map) { } } -func setInternalSpanStatus(attrs pdata.Map, dest pdata.SpanStatus) { - statusCode := pdata.StatusCodeUnset +func setInternalSpanStatus(attrs pcommon.Map, dest ptrace.SpanStatus) { + statusCode := ptrace.StatusCodeUnset statusMessage := "" statusExists := false if errorVal, ok := attrs.Get(tracetranslator.TagError); ok { if errorVal.BoolVal() { - statusCode = pdata.StatusCodeError + statusCode = ptrace.StatusCodeError attrs.Delete(tracetranslator.TagError) statusExists = true @@ -222,9 +223,9 @@ func setInternalSpanStatus(attrs pdata.Map, dest pdata.SpanStatus) { statusExists = true switch strings.ToUpper(codeAttr.StringVal()) { case statusOk: - statusCode = pdata.StatusCodeOk + statusCode = ptrace.StatusCodeOk case statusError: - statusCode = pdata.StatusCodeError + statusCode = ptrace.StatusCodeError } if desc, ok := extractStatusDescFromAttr(attrs); ok { @@ -240,7 +241,7 @@ func setInternalSpanStatus(attrs pdata.Map, dest pdata.SpanStatus) { // request or response, but again, only do so if the `error` tag was // not set to true and no explicit status was sent. if code, err := getStatusCodeFromHTTPStatusAttr(httpCodeAttr); err == nil { - if code != pdata.StatusCodeUnset { + if code != ptrace.StatusCodeUnset { statusExists = true statusCode = code } @@ -261,7 +262,7 @@ func setInternalSpanStatus(attrs pdata.Map, dest pdata.SpanStatus) { // along with true if it is set. Otherwise, an empty string and false are // returned. The OTel status description attribute is deleted from attrs in // the process. -func extractStatusDescFromAttr(attrs pdata.Map) (string, bool) { +func extractStatusDescFromAttr(attrs pcommon.Map) (string, bool) { if msgAttr, ok := attrs.Get(conventions.OtelStatusDescription); ok { msg := msgAttr.StringVal() attrs.Delete(conventions.OtelStatusDescription) @@ -273,12 +274,12 @@ func extractStatusDescFromAttr(attrs pdata.Map) (string, bool) { // codeFromAttr returns the integer code value from attrVal. An error is // returned if the code is not represented by an integer or string value in // the attrVal or the value is outside the bounds of an int representation. -func codeFromAttr(attrVal pdata.Value) (int64, error) { +func codeFromAttr(attrVal pcommon.Value) (int64, error) { var val int64 switch attrVal.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: val = attrVal.IntVal() - case pdata.ValueTypeString: + case pcommon.ValueTypeString: var err error val, err = strconv.ParseInt(attrVal.StringVal(), 10, 0) if err != nil { @@ -290,32 +291,32 @@ func codeFromAttr(attrVal pdata.Value) (int64, error) { return val, nil } -func getStatusCodeFromHTTPStatusAttr(attrVal pdata.Value) (pdata.StatusCode, error) { +func getStatusCodeFromHTTPStatusAttr(attrVal pcommon.Value) (ptrace.StatusCode, error) { statusCode, err := codeFromAttr(attrVal) if err != nil { - return pdata.StatusCodeUnset, err + return ptrace.StatusCodeUnset, err } return tracetranslator.StatusCodeFromHTTP(statusCode), nil } -func jSpanKindToInternal(spanKind string) pdata.SpanKind { +func jSpanKindToInternal(spanKind string) ptrace.SpanKind { switch spanKind { case "client": - return pdata.SpanKindClient + return ptrace.SpanKindClient case "server": - return pdata.SpanKindServer + return ptrace.SpanKindServer case "producer": - return pdata.SpanKindProducer + return ptrace.SpanKindProducer case "consumer": - return pdata.SpanKindConsumer + return ptrace.SpanKindConsumer case "internal": - return pdata.SpanKindInternal + return ptrace.SpanKindInternal } - return pdata.SpanKindUnspecified + return ptrace.SpanKindUnspecified } -func jLogsToSpanEvents(logs []model.Log, dest pdata.SpanEventSlice) { +func jLogsToSpanEvents(logs []model.Log, dest ptrace.SpanEventSlice) { if len(logs) == 0 { return } @@ -323,14 +324,14 @@ func jLogsToSpanEvents(logs []model.Log, dest pdata.SpanEventSlice) { dest.EnsureCapacity(len(logs)) for i, log := range logs { - var event pdata.SpanEvent + var event ptrace.SpanEvent if dest.Len() > i { event = dest.At(i) } else { event = dest.AppendEmpty() } - event.SetTimestamp(pdata.NewTimestampFromTime(log.Timestamp)) + event.SetTimestamp(pcommon.NewTimestampFromTime(log.Timestamp)) if len(log.Fields) == 0 { continue } @@ -347,7 +348,7 @@ func jLogsToSpanEvents(logs []model.Log, dest pdata.SpanEventSlice) { } // jReferencesToSpanLinks sets internal span links based on jaeger span references skipping excludeParentID -func jReferencesToSpanLinks(refs []model.SpanRef, excludeParentID model.SpanID, dest pdata.SpanLinkSlice) { +func jReferencesToSpanLinks(refs []model.SpanRef, excludeParentID model.SpanID, dest ptrace.SpanLinkSlice) { if len(refs) == 0 || len(refs) == 1 && refs[0].SpanID == excludeParentID && refs[0].RefType == model.ChildOf { return } @@ -364,11 +365,11 @@ func jReferencesToSpanLinks(refs []model.SpanRef, excludeParentID model.SpanID, } } -func getTraceStateFromAttrs(attrs pdata.Map) pdata.TraceState { - traceState := pdata.TraceStateEmpty +func getTraceStateFromAttrs(attrs pcommon.Map) ptrace.TraceState { + traceState := ptrace.TraceStateEmpty // TODO Bring this inline with solution for jaegertracing/jaeger-client-java #702 once available if attr, ok := attrs.Get(tracetranslator.TagW3CTraceState); ok { - traceState = pdata.TraceState(attr.StringVal()) + traceState = ptrace.TraceState(attr.StringVal()) attrs.Delete(tracetranslator.TagW3CTraceState) } return traceState diff --git a/pkg/translator/jaeger/jaegerproto_to_traces_test.go b/pkg/translator/jaeger/jaegerproto_to_traces_test.go index 6b9f15b5345b..ec4f49e8b4e0 100644 --- a/pkg/translator/jaeger/jaegerproto_to_traces_test.go +++ b/pkg/translator/jaeger/jaegerproto_to_traces_test.go @@ -23,8 +23,9 @@ import ( "github.com/jaegertracing/jaeger/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" @@ -34,42 +35,42 @@ import ( // Use timespamp with microsecond granularity to work well with jaeger thrift translation var ( testSpanStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321000, time.UTC) - testSpanStartTimestamp = pdata.NewTimestampFromTime(testSpanStartTime) + testSpanStartTimestamp = pcommon.NewTimestampFromTime(testSpanStartTime) testSpanEventTime = time.Date(2020, 2, 11, 20, 26, 13, 123000, time.UTC) - testSpanEventTimestamp = pdata.NewTimestampFromTime(testSpanEventTime) + testSpanEventTimestamp = pcommon.NewTimestampFromTime(testSpanEventTime) testSpanEndTime = time.Date(2020, 2, 11, 20, 26, 13, 789000, time.UTC) - testSpanEndTimestamp = pdata.NewTimestampFromTime(testSpanEndTime) + testSpanEndTimestamp = pcommon.NewTimestampFromTime(testSpanEndTime) ) func TestCodeFromAttr(t *testing.T) { tests := []struct { name string - attr pdata.Value + attr pcommon.Value code int64 err error }{ { name: "ok-string", - attr: pdata.NewValueString("0"), + attr: pcommon.NewValueString("0"), code: 0, }, { name: "ok-int", - attr: pdata.NewValueInt(1), + attr: pcommon.NewValueInt(1), code: 1, }, { name: "wrong-type", - attr: pdata.NewValueBool(true), + attr: pcommon.NewValueBool(true), code: 0, err: errType, }, { name: "invalid-string", - attr: pdata.NewValueString("inf"), + attr: pcommon.NewValueString("inf"), code: 0, err: strconv.ErrSyntax, }, @@ -91,36 +92,36 @@ func TestCodeFromAttr(t *testing.T) { func TestGetStatusCodeFromHTTPStatusAttr(t *testing.T) { tests := []struct { name string - attr pdata.Value - code pdata.StatusCode + attr pcommon.Value + code ptrace.StatusCode }{ { name: "string-unknown", - attr: pdata.NewValueString("10"), - code: pdata.StatusCodeError, + attr: pcommon.NewValueString("10"), + code: ptrace.StatusCodeError, }, { name: "string-ok", - attr: pdata.NewValueString("101"), - code: pdata.StatusCodeUnset, + attr: pcommon.NewValueString("101"), + code: ptrace.StatusCodeUnset, }, { name: "int-not-found", - attr: pdata.NewValueInt(404), - code: pdata.StatusCodeError, + attr: pcommon.NewValueInt(404), + code: ptrace.StatusCodeError, }, { name: "int-invalid-arg", - attr: pdata.NewValueInt(408), - code: pdata.StatusCodeError, + attr: pcommon.NewValueInt(408), + code: ptrace.StatusCodeError, }, { name: "int-internal", - attr: pdata.NewValueInt(500), - code: pdata.StatusCodeError, + attr: pcommon.NewValueInt(500), + code: ptrace.StatusCodeError, }, } @@ -162,14 +163,14 @@ func TestJTagsToInternalAttributes(t *testing.T) { }, } - expected := pdata.NewMap() + expected := pcommon.NewMap() expected.InsertBool("bool-val", true) expected.InsertInt("int-val", 123) expected.InsertString("string-val", "abc") expected.InsertDouble("double-val", 1.23) expected.InsertString("binary-val", "AAAAAABkfZg=") - got := pdata.NewMap() + got := pcommon.NewMap() jTagsToInternalAttributes(tags, got) require.EqualValues(t, expected, got) @@ -180,12 +181,12 @@ func TestProtoToTraces(t *testing.T) { tests := []struct { name string jb []*model.Batch - td pdata.Traces + td ptrace.Traces }{ { name: "empty", jb: []*model.Batch{}, - td: pdata.NewTraces(), + td: ptrace.NewTraces(), }, { @@ -325,37 +326,37 @@ func TestProtoBatchToInternalTracesWithTwoLibraries(t *testing.T) { func TestSetInternalSpanStatus(t *testing.T) { - emptyStatus := pdata.NewSpanStatus() + emptyStatus := ptrace.NewSpanStatus() - okStatus := pdata.NewSpanStatus() - okStatus.SetCode(pdata.StatusCodeOk) + okStatus := ptrace.NewSpanStatus() + okStatus.SetCode(ptrace.StatusCodeOk) - errorStatus := pdata.NewSpanStatus() - errorStatus.SetCode(pdata.StatusCodeError) + errorStatus := ptrace.NewSpanStatus() + errorStatus.SetCode(ptrace.StatusCodeError) - errorStatusWithMessage := pdata.NewSpanStatus() - errorStatusWithMessage.SetCode(pdata.StatusCodeError) + errorStatusWithMessage := ptrace.NewSpanStatus() + errorStatusWithMessage.SetCode(ptrace.StatusCodeError) errorStatusWithMessage.SetMessage("Error: Invalid argument") - errorStatusWith404Message := pdata.NewSpanStatus() - errorStatusWith404Message.SetCode(pdata.StatusCodeError) + errorStatusWith404Message := ptrace.NewSpanStatus() + errorStatusWith404Message.SetCode(ptrace.StatusCodeError) errorStatusWith404Message.SetMessage("HTTP 404: Not Found") tests := []struct { name string - attrs pdata.Map - status pdata.SpanStatus + attrs pcommon.Map + status ptrace.SpanStatus attrsModifiedLen int // Length of attributes map after dropping converted fields }{ { name: "No tags set -> OK status", - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), status: emptyStatus, attrsModifiedLen: 0, }, { name: "error tag set -> Error status", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ tracetranslator.TagError: true, }), status: errorStatus, @@ -363,7 +364,7 @@ func TestSetInternalSpanStatus(t *testing.T) { }, { name: "status.code is set as string", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.OtelStatusCode: statusOk, }), status: okStatus, @@ -371,7 +372,7 @@ func TestSetInternalSpanStatus(t *testing.T) { }, { name: "status.code, status.message and error tags are set", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ tracetranslator.TagError: true, conventions.OtelStatusCode: statusError, conventions.OtelStatusDescription: "Error: Invalid argument", @@ -381,7 +382,7 @@ func TestSetInternalSpanStatus(t *testing.T) { }, { name: "http.status_code tag is set as string", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPStatusCode: "404", }), status: errorStatus, @@ -389,7 +390,7 @@ func TestSetInternalSpanStatus(t *testing.T) { }, { name: "http.status_code, http.status_message and error tags are set", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ tracetranslator.TagError: true, conventions.AttributeHTTPStatusCode: 404, tracetranslator.TagHTTPStatusMsg: "HTTP 404: Not Found", @@ -399,7 +400,7 @@ func TestSetInternalSpanStatus(t *testing.T) { }, { name: "status.code has precedence over http.status_code.", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ conventions.OtelStatusCode: statusOk, conventions.AttributeHTTPStatusCode: 500, tracetranslator.TagHTTPStatusMsg: "Server Error", @@ -409,7 +410,7 @@ func TestSetInternalSpanStatus(t *testing.T) { }, { name: "Ignore http.status_code == 200 if error set to true.", - attrs: pdata.NewMapFromRaw(map[string]interface{}{ + attrs: pcommon.NewMapFromRaw(map[string]interface{}{ tracetranslator.TagError: true, conventions.AttributeHTTPStatusCode: 200, }), @@ -420,7 +421,7 @@ func TestSetInternalSpanStatus(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - status := pdata.NewSpanStatus() + status := ptrace.NewSpanStatus() setInternalSpanStatus(test.attrs, status) assert.EqualValues(t, test.status, status) assert.Equal(t, test.attrsModifiedLen, test.attrs.Len()) @@ -463,31 +464,31 @@ func TestProtoBatchesToInternalTraces(t *testing.T) { func TestJSpanKindToInternal(t *testing.T) { tests := []struct { jSpanKind string - otlpSpanKind pdata.SpanKind + otlpSpanKind ptrace.SpanKind }{ { jSpanKind: "client", - otlpSpanKind: pdata.SpanKindClient, + otlpSpanKind: ptrace.SpanKindClient, }, { jSpanKind: "server", - otlpSpanKind: pdata.SpanKindServer, + otlpSpanKind: ptrace.SpanKindServer, }, { jSpanKind: "producer", - otlpSpanKind: pdata.SpanKindProducer, + otlpSpanKind: ptrace.SpanKindProducer, }, { jSpanKind: "consumer", - otlpSpanKind: pdata.SpanKindConsumer, + otlpSpanKind: ptrace.SpanKindConsumer, }, { jSpanKind: "internal", - otlpSpanKind: pdata.SpanKindInternal, + otlpSpanKind: ptrace.SpanKindInternal, }, { jSpanKind: "all-others", - otlpSpanKind: pdata.SpanKindUnspecified, + otlpSpanKind: ptrace.SpanKindUnspecified, }, } @@ -498,7 +499,7 @@ func TestJSpanKindToInternal(t *testing.T) { } } -func generateTracesResourceOnly() pdata.Traces { +func generateTracesResourceOnly() ptrace.Traces { td := testdata.GenerateTracesOneEmptyResourceSpans() rs := td.ResourceSpans().At(0).Resource() rs.Attributes().InsertString(conventions.AttributeServiceName, "service-1") @@ -506,7 +507,7 @@ func generateTracesResourceOnly() pdata.Traces { return td } -func generateTracesResourceOnlyWithNoAttrs() pdata.Traces { +func generateTracesResourceOnlyWithNoAttrs() ptrace.Traces { return testdata.GenerateTracesOneEmptyResourceSpans() } @@ -523,17 +524,17 @@ func generateProtoProcess() *model.Process { } } -func generateTracesOneSpanNoResource() pdata.Traces { +func generateTracesOneSpanNoResource() ptrace.Traces { td := testdata.GenerateTracesOneSpanNoResource() span := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) - span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) - span.SetTraceID(pdata.NewTraceID( + span.SetSpanID(pcommon.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) + span.SetTraceID(pcommon.NewTraceID( [16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) span.SetDroppedAttributesCount(0) span.SetDroppedEventsCount(0) span.SetStartTimestamp(testSpanStartTimestamp) span.SetEndTimestamp(testSpanEndTimestamp) - span.SetKind(pdata.SpanKindClient) + span.SetKind(ptrace.SpanKindClient) span.Events().At(0).SetTimestamp(testSpanEventTimestamp) span.Events().At(0).SetDroppedAttributesCount(0) span.Events().At(0).SetName("event-with-attr") @@ -544,7 +545,7 @@ func generateTracesOneSpanNoResource() pdata.Traces { return td } -func generateTracesWithLibraryInfo() pdata.Traces { +func generateTracesWithLibraryInfo() ptrace.Traces { td := generateTracesOneSpanNoResource() rs0 := td.ResourceSpans().At(0) rs0ils0 := rs0.ScopeSpans().At(0) @@ -553,7 +554,7 @@ func generateTracesWithLibraryInfo() pdata.Traces { return td } -func generateTracesOneSpanNoResourceWithTraceState() pdata.Traces { +func generateTracesOneSpanNoResourceWithTraceState() ptrace.Traces { td := generateTracesOneSpanNoResource() span := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) span.SetTraceState("lasterror=f39cd56cc44274fd5abd07ef1164246d10ce2955") @@ -705,19 +706,19 @@ func generateProtoSpanWithTraceState() *model.Span { } } -func generateTracesTwoSpansChildParent() pdata.Traces { +func generateTracesTwoSpansChildParent() ptrace.Traces { td := generateTracesOneSpanNoResource() spans := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans() span := spans.AppendEmpty() span.SetName("operationB") - span.SetSpanID(pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) span.SetParentSpanID(spans.At(0).SpanID()) - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) span.SetTraceID(spans.At(0).TraceID()) span.SetStartTimestamp(spans.At(0).StartTimestamp()) span.SetEndTimestamp(spans.At(0).EndTimestamp()) - span.Status().SetCode(pdata.StatusCodeError) + span.Status().SetCode(ptrace.StatusCodeError) span.Attributes().InsertInt(conventions.AttributeHTTPStatusCode, 404) return td } @@ -755,18 +756,18 @@ func generateProtoChildSpan() *model.Span { } } -func generateTracesTwoSpansWithFollower() pdata.Traces { +func generateTracesTwoSpansWithFollower() ptrace.Traces { td := generateTracesOneSpanNoResource() spans := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans() span := spans.AppendEmpty() span.SetName("operationC") - span.SetSpanID(pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18})) span.SetTraceID(spans.At(0).TraceID()) span.SetStartTimestamp(spans.At(0).EndTimestamp()) span.SetEndTimestamp(spans.At(0).EndTimestamp() + 1000000) - span.SetKind(pdata.SpanKindConsumer) - span.Status().SetCode(pdata.StatusCodeOk) + span.SetKind(ptrace.SpanKindConsumer) + span.Status().SetCode(ptrace.StatusCodeOk) span.Status().SetMessage("status-ok") link := span.Links().AppendEmpty() link.SetTraceID(span.TraceID()) @@ -829,7 +830,7 @@ func BenchmarkProtoBatchToInternalTraces(b *testing.B) { } } -func generateTracesTwoSpansFromTwoLibraries() pdata.Traces { +func generateTracesTwoSpansFromTwoLibraries() ptrace.Traces { td := testdata.GenerateTracesOneEmptyResourceSpans() rs0 := td.ResourceSpans().At(0) diff --git a/pkg/translator/jaeger/jaegerthrift_to_traces.go b/pkg/translator/jaeger/jaegerthrift_to_traces.go index 6cd1c6c434e3..5399e25b1b67 100644 --- a/pkg/translator/jaeger/jaegerthrift_to_traces.go +++ b/pkg/translator/jaeger/jaegerthrift_to_traces.go @@ -20,8 +20,9 @@ import ( "reflect" "github.com/jaegertracing/jaeger/thrift-gen/jaeger" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -29,9 +30,9 @@ import ( var blankJaegerThriftSpan = new(jaeger.Span) -// ThriftToTraces transforms a Thrift trace batch into pdata.Traces. -func ThriftToTraces(batches *jaeger.Batch) (pdata.Traces, error) { - traceData := pdata.NewTraces() +// ThriftToTraces transforms a Thrift trace batch into ptrace.Traces. +func ThriftToTraces(batches *jaeger.Batch) (ptrace.Traces, error) { + traceData := ptrace.NewTraces() jProcess := batches.GetProcess() jSpans := batches.GetSpans() @@ -51,7 +52,7 @@ func ThriftToTraces(batches *jaeger.Batch) (pdata.Traces, error) { return traceData, nil } -func jThriftProcessToInternalResource(process *jaeger.Process, dest pdata.Resource) { +func jThriftProcessToInternalResource(process *jaeger.Process, dest pcommon.Resource) { if process == nil { return } @@ -77,7 +78,7 @@ func jThriftProcessToInternalResource(process *jaeger.Process, dest pdata.Resour translateJaegerVersionAttr(attrs) } -func jThriftSpansToInternal(spans []*jaeger.Span, dest pdata.SpanSlice) { +func jThriftSpansToInternal(spans []*jaeger.Span, dest ptrace.SpanSlice) { if len(spans) == 0 { return } @@ -91,7 +92,7 @@ func jThriftSpansToInternal(spans []*jaeger.Span, dest pdata.SpanSlice) { } } -func jThriftSpanToInternal(span *jaeger.Span, dest pdata.Span) { +func jThriftSpanToInternal(span *jaeger.Span, dest ptrace.Span) { dest.SetTraceID(idutils.UInt64ToTraceID(uint64(span.TraceIdHigh), uint64(span.TraceIdLow))) dest.SetSpanID(idutils.UInt64ToSpanID(uint64(span.SpanId))) dest.SetName(span.OperationName) @@ -122,7 +123,7 @@ func jThriftSpanToInternal(span *jaeger.Span, dest pdata.Span) { } // jThriftTagsToInternalAttributes sets internal span links based on jaeger span references skipping excludeParentID -func jThriftTagsToInternalAttributes(tags []*jaeger.Tag, dest pdata.Map) { +func jThriftTagsToInternalAttributes(tags []*jaeger.Tag, dest pcommon.Map) { for _, tag := range tags { switch tag.GetVType() { case jaeger.TagType_STRING: @@ -141,7 +142,7 @@ func jThriftTagsToInternalAttributes(tags []*jaeger.Tag, dest pdata.Map) { } } -func jThriftLogsToSpanEvents(logs []*jaeger.Log, dest pdata.SpanEventSlice) { +func jThriftLogsToSpanEvents(logs []*jaeger.Log, dest ptrace.SpanEventSlice) { if len(logs) == 0 { return } @@ -167,7 +168,7 @@ func jThriftLogsToSpanEvents(logs []*jaeger.Log, dest pdata.SpanEventSlice) { } } -func jThriftReferencesToSpanLinks(refs []*jaeger.SpanRef, excludeParentID int64, dest pdata.SpanLinkSlice) { +func jThriftReferencesToSpanLinks(refs []*jaeger.SpanRef, excludeParentID int64, dest ptrace.SpanLinkSlice) { if len(refs) == 0 || len(refs) == 1 && refs[0].SpanId == excludeParentID && refs[0].RefType == jaeger.SpanRefType_CHILD_OF { return } @@ -184,7 +185,7 @@ func jThriftReferencesToSpanLinks(refs []*jaeger.SpanRef, excludeParentID int64, } } -// microsecondsToUnixNano converts epoch microseconds to pdata.Timestamp -func microsecondsToUnixNano(ms int64) pdata.Timestamp { - return pdata.Timestamp(uint64(ms) * 1000) +// microsecondsToUnixNano converts epoch microseconds to pcommon.Timestamp +func microsecondsToUnixNano(ms int64) pcommon.Timestamp { + return pcommon.Timestamp(uint64(ms) * 1000) } diff --git a/pkg/translator/jaeger/jaegerthrift_to_traces_test.go b/pkg/translator/jaeger/jaegerthrift_to_traces_test.go index af2b0c4ba727..502beae1d328 100644 --- a/pkg/translator/jaeger/jaegerthrift_to_traces_test.go +++ b/pkg/translator/jaeger/jaegerthrift_to_traces_test.go @@ -21,8 +21,9 @@ import ( "github.com/jaegertracing/jaeger/thrift-gen/jaeger" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -61,14 +62,14 @@ func TestJThriftTagsToInternalAttributes(t *testing.T) { }, } - expected := pdata.NewMap() + expected := pcommon.NewMap() expected.InsertBool("bool-val", true) expected.InsertInt("int-val", 123) expected.InsertString("string-val", "abc") expected.InsertDouble("double-val", 1.23) expected.InsertString("binary-val", "AAAAAABkfZg=") - got := pdata.NewMap() + got := pcommon.NewMap() jThriftTagsToInternalAttributes(tags, got) require.EqualValues(t, expected, got) @@ -79,12 +80,12 @@ func TestThriftBatchToInternalTraces(t *testing.T) { tests := []struct { name string jb *jaeger.Batch - td pdata.Traces + td ptrace.Traces }{ { name: "empty", jb: &jaeger.Batch{}, - td: pdata.NewTraces(), + td: ptrace.NewTraces(), }, { @@ -283,7 +284,7 @@ func generateThriftFollowerSpan() *jaeger.Span { } } -func unixNanoToMicroseconds(ns pdata.Timestamp) int64 { +func unixNanoToMicroseconds(ns pcommon.Timestamp) int64 { return int64(ns / 1000) } diff --git a/pkg/translator/jaeger/traces_to_jaegerproto.go b/pkg/translator/jaeger/traces_to_jaegerproto.go index d53030a85ce3..b95045e80868 100644 --- a/pkg/translator/jaeger/traces_to_jaegerproto.go +++ b/pkg/translator/jaeger/traces_to_jaegerproto.go @@ -16,8 +16,9 @@ package jaeger // import "github.com/open-telemetry/opentelemetry-collector-cont import ( "github.com/jaegertracing/jaeger/model" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -25,7 +26,7 @@ import ( // ProtoFromTraces translates internal trace data into the Jaeger Proto for GRPC. // Returns slice of translated Jaeger batches and error if translation failed. -func ProtoFromTraces(td pdata.Traces) ([]*model.Batch, error) { +func ProtoFromTraces(td ptrace.Traces) ([]*model.Batch, error) { resourceSpans := td.ResourceSpans() if resourceSpans.Len() == 0 { @@ -44,7 +45,7 @@ func ProtoFromTraces(td pdata.Traces) ([]*model.Batch, error) { return batches, nil } -func resourceSpansToJaegerProto(rs pdata.ResourceSpans) *model.Batch { +func resourceSpansToJaegerProto(rs ptrace.ResourceSpans) *model.Batch { resource := rs.Resource() ilss := rs.ScopeSpans() @@ -81,7 +82,7 @@ func resourceSpansToJaegerProto(rs pdata.ResourceSpans) *model.Batch { return batch } -func resourceToJaegerProtoProcess(resource pdata.Resource) *model.Process { +func resourceToJaegerProtoProcess(resource pcommon.Resource) *model.Process { process := &model.Process{} attrs := resource.Attributes() if attrs.Len() == 0 { @@ -103,12 +104,12 @@ func resourceToJaegerProtoProcess(resource pdata.Resource) *model.Process { } -func appendTagsFromResourceAttributes(dest []model.KeyValue, attrs pdata.Map) []model.KeyValue { +func appendTagsFromResourceAttributes(dest []model.KeyValue, attrs pcommon.Map) []model.KeyValue { if attrs.Len() == 0 { return dest } - attrs.Range(func(key string, attr pdata.Value) bool { + attrs.Range(func(key string, attr pcommon.Value) bool { if key == conventions.AttributeServiceName { return true } @@ -118,42 +119,42 @@ func appendTagsFromResourceAttributes(dest []model.KeyValue, attrs pdata.Map) [] return dest } -func appendTagsFromAttributes(dest []model.KeyValue, attrs pdata.Map) []model.KeyValue { +func appendTagsFromAttributes(dest []model.KeyValue, attrs pcommon.Map) []model.KeyValue { if attrs.Len() == 0 { return dest } - attrs.Range(func(key string, attr pdata.Value) bool { + attrs.Range(func(key string, attr pcommon.Value) bool { dest = append(dest, attributeToJaegerProtoTag(key, attr)) return true }) return dest } -func attributeToJaegerProtoTag(key string, attr pdata.Value) model.KeyValue { +func attributeToJaegerProtoTag(key string, attr pcommon.Value) model.KeyValue { tag := model.KeyValue{Key: key} switch attr.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: // Jaeger-to-Internal maps binary tags to string attributes and encodes them as // base64 strings. Blindingly attempting to decode base64 seems too much. tag.VType = model.ValueType_STRING tag.VStr = attr.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: tag.VType = model.ValueType_INT64 tag.VInt64 = attr.IntVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: tag.VType = model.ValueType_BOOL tag.VBool = attr.BoolVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: tag.VType = model.ValueType_FLOAT64 tag.VFloat64 = attr.DoubleVal() - case pdata.ValueTypeMap, pdata.ValueTypeSlice: + case pcommon.ValueTypeMap, pcommon.ValueTypeSlice: tag.VType = model.ValueType_STRING tag.VStr = attr.AsString() } return tag } -func spanToJaegerProto(span pdata.Span, libraryTags pdata.InstrumentationScope) *model.Span { +func spanToJaegerProto(span ptrace.Span, libraryTags pcommon.InstrumentationScope) *model.Span { traceID := traceIDToJaegerProto(span.TraceID()) jReferences := makeJaegerProtoReferences(span.Links(), span.ParentSpanID(), traceID) @@ -170,7 +171,7 @@ func spanToJaegerProto(span pdata.Span, libraryTags pdata.InstrumentationScope) } } -func getJaegerProtoSpanTags(span pdata.Span, instrumentationLibrary pdata.InstrumentationScope) []model.KeyValue { +func getJaegerProtoSpanTags(span ptrace.Span, instrumentationLibrary pcommon.InstrumentationScope) []model.KeyValue { var spanKindTag, statusCodeTag, errorTag, statusMsgTag model.KeyValue var spanKindTagFound, statusCodeTagFound, errorTagFound, statusMsgTagFound bool @@ -230,7 +231,7 @@ func getJaegerProtoSpanTags(span pdata.Span, instrumentationLibrary pdata.Instru return tags } -func traceIDToJaegerProto(traceID pdata.TraceID) model.TraceID { +func traceIDToJaegerProto(traceID pcommon.TraceID) model.TraceID { traceIDHigh, traceIDLow := idutils.TraceIDToUInt64Pair(traceID) return model.TraceID{ Low: traceIDLow, @@ -238,12 +239,12 @@ func traceIDToJaegerProto(traceID pdata.TraceID) model.TraceID { } } -func spanIDToJaegerProto(spanID pdata.SpanID) model.SpanID { +func spanIDToJaegerProto(spanID pcommon.SpanID) model.SpanID { return model.SpanID(idutils.SpanIDToUInt64(spanID)) } // makeJaegerProtoReferences constructs jaeger span references based on parent span ID and span links -func makeJaegerProtoReferences(links pdata.SpanLinkSlice, parentSpanID pdata.SpanID, traceID model.TraceID) []model.SpanRef { +func makeJaegerProtoReferences(links ptrace.SpanLinkSlice, parentSpanID pcommon.SpanID, traceID model.TraceID) []model.SpanRef { parentSpanIDSet := !parentSpanID.IsEmpty() if !parentSpanIDSet && links.Len() == 0 { return nil @@ -282,7 +283,7 @@ func makeJaegerProtoReferences(links pdata.SpanLinkSlice, parentSpanID pdata.Spa return refs } -func spanEventsToJaegerProtoLogs(events pdata.SpanEventSlice) []model.Log { +func spanEventsToJaegerProtoLogs(events ptrace.SpanEventSlice) []model.Log { if events.Len() == 0 { return nil } @@ -308,18 +309,18 @@ func spanEventsToJaegerProtoLogs(events pdata.SpanEventSlice) []model.Log { return logs } -func getTagFromSpanKind(spanKind pdata.SpanKind) (model.KeyValue, bool) { +func getTagFromSpanKind(spanKind ptrace.SpanKind) (model.KeyValue, bool) { var tagStr string switch spanKind { - case pdata.SpanKindClient: + case ptrace.SpanKindClient: tagStr = string(tracetranslator.OpenTracingSpanKindClient) - case pdata.SpanKindServer: + case ptrace.SpanKindServer: tagStr = string(tracetranslator.OpenTracingSpanKindServer) - case pdata.SpanKindProducer: + case ptrace.SpanKindProducer: tagStr = string(tracetranslator.OpenTracingSpanKindProducer) - case pdata.SpanKindConsumer: + case ptrace.SpanKindConsumer: tagStr = string(tracetranslator.OpenTracingSpanKindConsumer) - case pdata.SpanKindInternal: + case ptrace.SpanKindInternal: tagStr = string(tracetranslator.OpenTracingSpanKindInternal) default: return model.KeyValue{}, false @@ -332,15 +333,15 @@ func getTagFromSpanKind(spanKind pdata.SpanKind) (model.KeyValue, bool) { }, true } -func getTagFromStatusCode(statusCode pdata.StatusCode) (model.KeyValue, bool) { +func getTagFromStatusCode(statusCode ptrace.StatusCode) (model.KeyValue, bool) { switch statusCode { - case pdata.StatusCodeError: + case ptrace.StatusCodeError: return model.KeyValue{ Key: conventions.OtelStatusCode, VType: model.ValueType_STRING, VStr: statusError, }, true - case pdata.StatusCodeOk: + case ptrace.StatusCodeOk: return model.KeyValue{ Key: conventions.OtelStatusCode, VType: model.ValueType_STRING, @@ -350,8 +351,8 @@ func getTagFromStatusCode(statusCode pdata.StatusCode) (model.KeyValue, bool) { return model.KeyValue{}, false } -func getErrorTagFromStatusCode(statusCode pdata.StatusCode) (model.KeyValue, bool) { - if statusCode == pdata.StatusCodeError { +func getErrorTagFromStatusCode(statusCode ptrace.StatusCode) (model.KeyValue, bool) { + if statusCode == ptrace.StatusCodeError { return model.KeyValue{ Key: tracetranslator.TagError, VBool: true, @@ -373,9 +374,9 @@ func getTagFromStatusMsg(statusMsg string) (model.KeyValue, bool) { }, true } -func getTagsFromTraceState(traceState pdata.TraceState) ([]model.KeyValue, bool) { +func getTagsFromTraceState(traceState ptrace.TraceState) ([]model.KeyValue, bool) { keyValues := make([]model.KeyValue, 0) - exists := traceState != pdata.TraceStateEmpty + exists := traceState != ptrace.TraceStateEmpty if exists { // TODO Bring this inline with solution for jaegertracing/jaeger-client-java #702 once available kv := model.KeyValue{ @@ -388,7 +389,7 @@ func getTagsFromTraceState(traceState pdata.TraceState) ([]model.KeyValue, bool) return keyValues, exists } -func getTagsFromInstrumentationLibrary(il pdata.InstrumentationScope) ([]model.KeyValue, bool) { +func getTagsFromInstrumentationLibrary(il pcommon.InstrumentationScope) ([]model.KeyValue, bool) { keyValues := make([]model.KeyValue, 0) if ilName := il.Name(); ilName != "" { kv := model.KeyValue{ diff --git a/pkg/translator/jaeger/traces_to_jaegerproto_test.go b/pkg/translator/jaeger/traces_to_jaegerproto_test.go index 65178577334c..33ccabcace50 100644 --- a/pkg/translator/jaeger/traces_to_jaegerproto_test.go +++ b/pkg/translator/jaeger/traces_to_jaegerproto_test.go @@ -20,8 +20,9 @@ import ( "github.com/jaegertracing/jaeger/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/goldendataset" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -30,12 +31,12 @@ import ( func TestGetTagFromStatusCode(t *testing.T) { tests := []struct { name string - code pdata.StatusCode + code ptrace.StatusCode tag model.KeyValue }{ { name: "ok", - code: pdata.StatusCodeOk, + code: ptrace.StatusCodeOk, tag: model.KeyValue{ Key: conventions.OtelStatusCode, VType: model.ValueType_STRING, @@ -45,7 +46,7 @@ func TestGetTagFromStatusCode(t *testing.T) { { name: "error", - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, tag: model.KeyValue{ Key: conventions.OtelStatusCode, VType: model.ValueType_STRING, @@ -70,13 +71,13 @@ func TestGetErrorTagFromStatusCode(t *testing.T) { VType: model.ValueType_BOOL, } - _, ok := getErrorTagFromStatusCode(pdata.StatusCodeUnset) + _, ok := getErrorTagFromStatusCode(ptrace.StatusCodeUnset) assert.False(t, ok) - _, ok = getErrorTagFromStatusCode(pdata.StatusCodeOk) + _, ok = getErrorTagFromStatusCode(ptrace.StatusCodeOk) assert.False(t, ok) - got, ok := getErrorTagFromStatusCode(pdata.StatusCodeError) + got, ok := getErrorTagFromStatusCode(ptrace.StatusCodeError) assert.True(t, ok) assert.EqualValues(t, errTag, got) } @@ -97,20 +98,20 @@ func TestGetTagFromStatusMsg(t *testing.T) { func TestGetTagFromSpanKind(t *testing.T) { tests := []struct { name string - kind pdata.SpanKind + kind ptrace.SpanKind tag model.KeyValue ok bool }{ { name: "unspecified", - kind: pdata.SpanKindUnspecified, + kind: ptrace.SpanKindUnspecified, tag: model.KeyValue{}, ok: false, }, { name: "client", - kind: pdata.SpanKindClient, + kind: ptrace.SpanKindClient, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -121,7 +122,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "server", - kind: pdata.SpanKindServer, + kind: ptrace.SpanKindServer, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -132,7 +133,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "producer", - kind: pdata.SpanKindProducer, + kind: ptrace.SpanKindProducer, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -143,7 +144,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "consumer", - kind: pdata.SpanKindConsumer, + kind: ptrace.SpanKindConsumer, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -154,7 +155,7 @@ func TestGetTagFromSpanKind(t *testing.T) { { name: "internal", - kind: pdata.SpanKindInternal, + kind: ptrace.SpanKindInternal, tag: model.KeyValue{ Key: tracetranslator.TagSpanKind, VType: model.ValueType_STRING, @@ -175,7 +176,7 @@ func TestGetTagFromSpanKind(t *testing.T) { func TestAttributesToJaegerProtoTags(t *testing.T) { - attributes := pdata.NewMap() + attributes := pcommon.NewMap() attributes.InsertBool("bool-val", true) attributes.InsertInt("int-val", 123) attributes.InsertString("string-val", "abc") @@ -222,13 +223,13 @@ func TestInternalTracesToJaegerProto(t *testing.T) { tests := []struct { name string - td pdata.Traces + td ptrace.Traces jb *model.Batch err error }{ { name: "empty", - td: pdata.NewTraces(), + td: ptrace.NewTraces(), err: nil, }, diff --git a/pkg/translator/opencensus/go.mod b/pkg/translator/opencensus/go.mod index 7e86174a751a..d071e1a24fa6 100644 --- a/pkg/translator/opencensus/go.mod +++ b/pkg/translator/opencensus/go.mod @@ -9,7 +9,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 google.golang.org/protobuf v1.28.0 ) @@ -31,3 +32,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/translator/opencensus/go.sum b/pkg/translator/opencensus/go.sum index dbdacb4b945a..9b9f9e9ad626 100644 --- a/pkg/translator/opencensus/go.sum +++ b/pkg/translator/opencensus/go.sum @@ -86,8 +86,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= diff --git a/pkg/translator/opencensus/metrics_to_oc.go b/pkg/translator/opencensus/metrics_to_oc.go index e1d8a9cec004..fa62558b20d1 100644 --- a/pkg/translator/opencensus/metrics_to_oc.go +++ b/pkg/translator/opencensus/metrics_to_oc.go @@ -21,7 +21,8 @@ import ( ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "github.com/golang/protobuf/ptypes/wrappers" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type labelKeysAndType struct { @@ -35,9 +36,9 @@ type labelKeysAndType struct { } // ResourceMetricsToOC may be used only by OpenCensus receiver and exporter implementations. -// Deprecated: Use pdata.Metrics. +// Deprecated: Use pmetric.Metrics. // TODO: move this function to OpenCensus package. -func ResourceMetricsToOC(rm pdata.ResourceMetrics) (*occommon.Node, *ocresource.Resource, []*ocmetrics.Metric) { +func ResourceMetricsToOC(rm pmetric.ResourceMetrics) (*occommon.Node, *ocresource.Resource, []*ocmetrics.Metric) { node, resource := internalResourceToOC(rm.Resource()) ilms := rm.ScopeMetrics() if ilms.Len() == 0 { @@ -60,7 +61,7 @@ func ResourceMetricsToOC(rm pdata.ResourceMetrics) (*occommon.Node, *ocresource. return node, resource, ocMetrics } -func metricToOC(metric pdata.Metric) *ocmetrics.Metric { +func metricToOC(metric pmetric.Metric) *ocmetrics.Metric { lblKeys := collectLabelKeysAndValueType(metric) return &ocmetrics.Metric{ MetricDescriptor: &ocmetrics.MetricDescriptor{ @@ -75,7 +76,7 @@ func metricToOC(metric pdata.Metric) *ocmetrics.Metric { } } -func collectLabelKeysAndValueType(metric pdata.Metric) *labelKeysAndType { +func collectLabelKeysAndValueType(metric pmetric.Metric) *labelKeysAndType { // NOTE: Internal data structure and OpenCensus have different representations of labels: // - OC has a single "global" ordered list of label keys per metric in the MetricDescriptor; // then, every data point has an ordered list of label values matching the key index. @@ -93,13 +94,13 @@ func collectLabelKeysAndValueType(metric pdata.Metric) *labelKeysAndType { keySet := make(map[string]struct{}) allNumberDataPointValueInt := false switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: allNumberDataPointValueInt = collectLabelKeysNumberDataPoints(metric.Gauge().DataPoints(), keySet) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: allNumberDataPointValueInt = collectLabelKeysNumberDataPoints(metric.Sum().DataPoints(), keySet) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: collectLabelKeysHistogramDataPoints(metric.Histogram().DataPoints(), keySet) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: collectLabelKeysSummaryDataPoints(metric.Summary().DataPoints(), keySet) } @@ -136,53 +137,53 @@ func collectLabelKeysAndValueType(metric pdata.Metric) *labelKeysAndType { } // collectLabelKeysNumberDataPoints returns true if all values are int. -func collectLabelKeysNumberDataPoints(dps pdata.NumberDataPointSlice, keySet map[string]struct{}) bool { +func collectLabelKeysNumberDataPoints(dps pmetric.NumberDataPointSlice, keySet map[string]struct{}) bool { allInt := true for i := 0; i < dps.Len(); i++ { addLabelKeys(keySet, dps.At(i).Attributes()) - if dps.At(i).ValueType() != pdata.MetricValueTypeInt { + if dps.At(i).ValueType() != pmetric.MetricValueTypeInt { allInt = false } } return allInt } -func collectLabelKeysHistogramDataPoints(dhdp pdata.HistogramDataPointSlice, keySet map[string]struct{}) { +func collectLabelKeysHistogramDataPoints(dhdp pmetric.HistogramDataPointSlice, keySet map[string]struct{}) { for i := 0; i < dhdp.Len(); i++ { addLabelKeys(keySet, dhdp.At(i).Attributes()) } } -func collectLabelKeysSummaryDataPoints(dhdp pdata.SummaryDataPointSlice, keySet map[string]struct{}) { +func collectLabelKeysSummaryDataPoints(dhdp pmetric.SummaryDataPointSlice, keySet map[string]struct{}) { for i := 0; i < dhdp.Len(); i++ { addLabelKeys(keySet, dhdp.At(i).Attributes()) } } -func addLabelKeys(keySet map[string]struct{}, attributes pdata.Map) { - attributes.Range(func(k string, v pdata.Value) bool { +func addLabelKeys(keySet map[string]struct{}, attributes pcommon.Map) { + attributes.Range(func(k string, v pcommon.Value) bool { keySet[k] = struct{}{} return true }) } -func descriptorTypeToOC(metric pdata.Metric, allNumberDataPointValueInt bool) ocmetrics.MetricDescriptor_Type { +func descriptorTypeToOC(metric pmetric.Metric, allNumberDataPointValueInt bool) ocmetrics.MetricDescriptor_Type { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return gaugeType(allNumberDataPointValueInt) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: sd := metric.Sum() - if sd.IsMonotonic() && sd.AggregationTemporality() == pdata.MetricAggregationTemporalityCumulative { + if sd.IsMonotonic() && sd.AggregationTemporality() == pmetric.MetricAggregationTemporalityCumulative { return cumulativeType(allNumberDataPointValueInt) } return gaugeType(allNumberDataPointValueInt) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: hd := metric.Histogram() - if hd.AggregationTemporality() == pdata.MetricAggregationTemporalityCumulative { + if hd.AggregationTemporality() == pmetric.MetricAggregationTemporalityCumulative { return ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION } return ocmetrics.MetricDescriptor_GAUGE_DISTRIBUTION - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return ocmetrics.MetricDescriptor_SUMMARY } return ocmetrics.MetricDescriptor_UNSPECIFIED @@ -202,22 +203,22 @@ func cumulativeType(allNumberDataPointValueInt bool) ocmetrics.MetricDescriptor_ return ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE } -func dataPointsToTimeseries(metric pdata.Metric, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { +func dataPointsToTimeseries(metric pmetric.Metric, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return numberDataPointsToOC(metric.Gauge().DataPoints(), labelKeys) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return numberDataPointsToOC(metric.Sum().DataPoints(), labelKeys) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return doubleHistogramPointToOC(metric.Histogram().DataPoints(), labelKeys) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return doubleSummaryPointToOC(metric.Summary().DataPoints(), labelKeys) } return nil } -func numberDataPointsToOC(dps pdata.NumberDataPointSlice, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { +func numberDataPointsToOC(dps pmetric.NumberDataPointSlice, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { if dps.Len() == 0 { return nil } @@ -228,11 +229,11 @@ func numberDataPointsToOC(dps pdata.NumberDataPointSlice, labelKeys *labelKeysAn Timestamp: timestampAsTimestampPb(dp.Timestamp()), } switch dp.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: point.Value = &ocmetrics.Point_Int64Value{ Int64Value: dp.IntVal(), } - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: point.Value = &ocmetrics.Point_DoubleValue{ DoubleValue: dp.DoubleVal(), } @@ -247,7 +248,7 @@ func numberDataPointsToOC(dps pdata.NumberDataPointSlice, labelKeys *labelKeysAn return timeseries } -func doubleHistogramPointToOC(dps pdata.HistogramDataPointSlice, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { +func doubleHistogramPointToOC(dps pmetric.HistogramDataPointSlice, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { if dps.Len() == 0 { return nil } @@ -308,7 +309,7 @@ func histogramBucketsToOC(bcts []uint64) []*ocmetrics.DistributionValue_Bucket { return ocBuckets } -func doubleSummaryPointToOC(dps pdata.SummaryDataPointSlice, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { +func doubleSummaryPointToOC(dps pmetric.SummaryDataPointSlice, labelKeys *labelKeysAndType) []*ocmetrics.TimeSeries { if dps.Len() == 0 { return nil } @@ -340,7 +341,7 @@ func doubleSummaryPointToOC(dps pdata.SummaryDataPointSlice, labelKeys *labelKey return timeseries } -func summaryPercentilesToOC(qtls pdata.ValueAtQuantileSlice) []*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile { +func summaryPercentilesToOC(qtls pmetric.ValueAtQuantileSlice) []*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile { if qtls.Len() == 0 { return nil } @@ -356,7 +357,7 @@ func summaryPercentilesToOC(qtls pdata.ValueAtQuantileSlice) []*ocmetrics.Summar return ocPercentiles } -func exemplarsToOC(bounds []float64, ocBuckets []*ocmetrics.DistributionValue_Bucket, exemplars pdata.ExemplarSlice) { +func exemplarsToOC(bounds []float64, ocBuckets []*ocmetrics.DistributionValue_Bucket, exemplars pmetric.ExemplarSlice) { if exemplars.Len() == 0 { return } @@ -365,9 +366,9 @@ func exemplarsToOC(bounds []float64, ocBuckets []*ocmetrics.DistributionValue_Bu exemplar := exemplars.At(i) var val float64 switch exemplar.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val = float64(exemplar.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val = exemplar.DoubleVal() } pos := 0 @@ -381,11 +382,11 @@ func exemplarsToOC(bounds []float64, ocBuckets []*ocmetrics.DistributionValue_Bu } } -func exemplarToOC(filteredLabels pdata.Map, value float64, timestamp pdata.Timestamp) *ocmetrics.DistributionValue_Exemplar { +func exemplarToOC(filteredLabels pcommon.Map, value float64, timestamp pcommon.Timestamp) *ocmetrics.DistributionValue_Exemplar { var labels map[string]string if filteredLabels.Len() != 0 { labels = make(map[string]string, filteredLabels.Len()) - filteredLabels.Range(func(k string, v pdata.Value) bool { + filteredLabels.Range(func(k string, v pcommon.Value) bool { labels[k] = v.AsString() return true }) @@ -398,7 +399,7 @@ func exemplarToOC(filteredLabels pdata.Map, value float64, timestamp pdata.Times } } -func attributeValuesToOC(labels pdata.Map, labelKeys *labelKeysAndType) []*ocmetrics.LabelValue { +func attributeValuesToOC(labels pcommon.Map, labelKeys *labelKeysAndType) []*ocmetrics.LabelValue { if len(labelKeys.keys) == 0 { return nil } @@ -412,7 +413,7 @@ func attributeValuesToOC(labels pdata.Map, labelKeys *labelKeysAndType) []*ocmet } // Visit all defined labels in the point and override defaults with actual values - labels.Range(func(k string, v pdata.Value) bool { + labels.Range(func(k string, v pcommon.Value) bool { // Find the appropriate label value that we need to update keyIndex := labelKeys.keyIndices[k] labelValue := labelValues[keyIndex] diff --git a/pkg/translator/opencensus/metrics_to_oc_test.go b/pkg/translator/opencensus/metrics_to_oc_test.go index 07d143ab0f42..1972774beafc 100644 --- a/pkg/translator/opencensus/metrics_to_oc_test.go +++ b/pkg/translator/opencensus/metrics_to_oc_test.go @@ -23,8 +23,9 @@ import ( ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "google.golang.org/protobuf/types/known/timestamppb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -34,16 +35,16 @@ import ( func TestMetricsToOC(t *testing.T) { sampleMetricData := testdata.GeneratMetricsAllTypesWithSampleDatapoints() attrs := sampleMetricData.ResourceMetrics().At(0).Resource().Attributes() - attrs.Upsert(conventions.AttributeHostName, pdata.NewValueString("host1")) - attrs.Upsert(conventions.AttributeProcessPID, pdata.NewValueInt(123)) - attrs.Upsert(occonventions.AttributeProcessStartTime, pdata.NewValueString("2020-02-11T20:26:00Z")) - attrs.Upsert(conventions.AttributeTelemetrySDKLanguage, pdata.NewValueString("cpp")) - attrs.Upsert(conventions.AttributeTelemetrySDKVersion, pdata.NewValueString("v2.0.1")) - attrs.Upsert(occonventions.AttributeExporterVersion, pdata.NewValueString("v1.2.0")) + attrs.Upsert(conventions.AttributeHostName, pcommon.NewValueString("host1")) + attrs.Upsert(conventions.AttributeProcessPID, pcommon.NewValueInt(123)) + attrs.Upsert(occonventions.AttributeProcessStartTime, pcommon.NewValueString("2020-02-11T20:26:00Z")) + attrs.Upsert(conventions.AttributeTelemetrySDKLanguage, pcommon.NewValueString("cpp")) + attrs.Upsert(conventions.AttributeTelemetrySDKVersion, pcommon.NewValueString("v2.0.1")) + attrs.Upsert(occonventions.AttributeExporterVersion, pcommon.NewValueString("v1.2.0")) tests := []struct { name string - internal pdata.Metrics + internal pmetric.Metrics oc *agentmetricspb.ExportMetricsServiceRequest }{ { @@ -170,14 +171,14 @@ func generateOCTestData() *agentmetricspb.ExportMetricsServiceRequest { func TestMetricsType(t *testing.T) { tests := []struct { name string - internal func() pdata.Metric + internal func() pmetric.Metric descType ocmetrics.MetricDescriptor_Type }{ { name: "int-gauge", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeGauge) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeGauge) m.Gauge().DataPoints().AppendEmpty().SetIntVal(1) return m }, @@ -185,9 +186,9 @@ func TestMetricsType(t *testing.T) { }, { name: "double-gauge", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeGauge) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeGauge) m.Gauge().DataPoints().AppendEmpty().SetDoubleVal(1) return m }, @@ -195,11 +196,11 @@ func TestMetricsType(t *testing.T) { }, { name: "int-non-monotonic-delta-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) m.Sum().DataPoints().AppendEmpty().SetIntVal(1) return m }, @@ -207,11 +208,11 @@ func TestMetricsType(t *testing.T) { }, { name: "int-non-monotonic-cumulative-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.Sum().DataPoints().AppendEmpty().SetIntVal(1) return m }, @@ -219,11 +220,11 @@ func TestMetricsType(t *testing.T) { }, { name: "int-monotonic-delta-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) m.Sum().DataPoints().AppendEmpty().SetIntVal(1) return m }, @@ -231,11 +232,11 @@ func TestMetricsType(t *testing.T) { }, { name: "int-monotonic-cumulative-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.Sum().DataPoints().AppendEmpty().SetIntVal(1) return m }, @@ -243,11 +244,11 @@ func TestMetricsType(t *testing.T) { }, { name: "double-non-monotonic-delta-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) m.Sum().DataPoints().AppendEmpty().SetDoubleVal(1) return m }, @@ -255,11 +256,11 @@ func TestMetricsType(t *testing.T) { }, { name: "double-non-monotonic-cumulative-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.Sum().DataPoints().AppendEmpty().SetDoubleVal(1) return m }, @@ -267,11 +268,11 @@ func TestMetricsType(t *testing.T) { }, { name: "double-monotonic-delta-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) m.Sum().DataPoints().AppendEmpty().SetDoubleVal(1) return m }, @@ -279,11 +280,11 @@ func TestMetricsType(t *testing.T) { }, { name: "double-monotonic-cumulative-sum", - internal: func() pdata.Metric { - m := pdata.NewMetric() - m.SetDataType(pdata.MetricDataTypeSum) + internal: func() pmetric.Metric { + m := pmetric.NewMetric() + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.Sum().DataPoints().AppendEmpty().SetDoubleVal(1) return m }, diff --git a/pkg/translator/opencensus/oc_testdata_test.go b/pkg/translator/opencensus/oc_testdata_test.go index 8c50c6972922..4f7b913c50af 100644 --- a/pkg/translator/opencensus/oc_testdata_test.go +++ b/pkg/translator/opencensus/oc_testdata_test.go @@ -21,8 +21,8 @@ import ( agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/wrapperspb" @@ -613,8 +613,8 @@ func generateOCTestMetricDoubleSummary() *ocmetrics.Metric { } } -func generateResourceWithOcNodeAndResource() pdata.Resource { - resource := pdata.NewResource() +func generateResourceWithOcNodeAndResource() pcommon.Resource { + resource := pcommon.NewResource() resource.Attributes().InsertString(occonventions.AttributeProcessStartTime, "2020-02-11T20:26:00Z") resource.Attributes().InsertString(conventions.AttributeHostName, "host1") resource.Attributes().InsertInt(conventions.AttributeProcessPID, 123) diff --git a/pkg/translator/opencensus/oc_to_metrics.go b/pkg/translator/opencensus/oc_to_metrics.go index 773cd2874ba3..7bec38619271 100644 --- a/pkg/translator/opencensus/oc_to_metrics.go +++ b/pkg/translator/opencensus/oc_to_metrics.go @@ -18,14 +18,15 @@ import ( occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // OCToMetrics converts OC data format to data.MetricData. -// Deprecated: use pdata.Metrics instead. OCToMetrics may be used only by OpenCensus +// Deprecated: use pmetric.Metrics instead. OCToMetrics may be used only by OpenCensus // receiver and exporter implementations. -func OCToMetrics(node *occommon.Node, resource *ocresource.Resource, metrics []*ocmetrics.Metric) pdata.Metrics { - dest := pdata.NewMetrics() +func OCToMetrics(node *occommon.Node, resource *ocresource.Resource, metrics []*ocmetrics.Metric) pmetric.Metrics { + dest := pmetric.NewMetrics() if node == nil && resource == nil && len(metrics) == 0 { return dest } @@ -129,22 +130,22 @@ func OCToMetrics(node *occommon.Node, resource *ocresource.Resource, metrics []* return dest } -func ocMetricToResourceMetrics(ocMetric *ocmetrics.Metric, node *occommon.Node, out pdata.ResourceMetrics) { +func ocMetricToResourceMetrics(ocMetric *ocmetrics.Metric, node *occommon.Node, out pmetric.ResourceMetrics) { ocNodeResourceToInternal(node, ocMetric.Resource, out.Resource()) ilms := out.ScopeMetrics() ocMetricToMetrics(ocMetric, ilms.AppendEmpty().Metrics().AppendEmpty()) } -func ocMetricToMetrics(ocMetric *ocmetrics.Metric, metric pdata.Metric) { +func ocMetricToMetrics(ocMetric *ocmetrics.Metric, metric pmetric.Metric) { ocDescriptor := ocMetric.GetMetricDescriptor() if ocDescriptor == nil { - pdata.NewMetric().CopyTo(metric) + pmetric.NewMetric().CopyTo(metric) return } dataType, valType := descriptorTypeToMetrics(ocDescriptor.Type, metric) - if dataType == pdata.MetricDataTypeNone { - pdata.NewMetric().CopyTo(metric) + if dataType == pmetric.MetricDataTypeNone { + pmetric.NewMetric().CopyTo(metric) return } @@ -155,54 +156,54 @@ func ocMetricToMetrics(ocMetric *ocmetrics.Metric, metric pdata.Metric) { setDataPoints(ocMetric, metric, valType) } -func descriptorTypeToMetrics(t ocmetrics.MetricDescriptor_Type, metric pdata.Metric) (pdata.MetricDataType, pdata.MetricValueType) { +func descriptorTypeToMetrics(t ocmetrics.MetricDescriptor_Type, metric pmetric.Metric) (pmetric.MetricDataType, pmetric.MetricValueType) { switch t { case ocmetrics.MetricDescriptor_GAUGE_INT64: - metric.SetDataType(pdata.MetricDataTypeGauge) - return pdata.MetricDataTypeGauge, pdata.MetricValueTypeInt + metric.SetDataType(pmetric.MetricDataTypeGauge) + return pmetric.MetricDataTypeGauge, pmetric.MetricValueTypeInt case ocmetrics.MetricDescriptor_GAUGE_DOUBLE: - metric.SetDataType(pdata.MetricDataTypeGauge) - return pdata.MetricDataTypeGauge, pdata.MetricValueTypeDouble + metric.SetDataType(pmetric.MetricDataTypeGauge) + return pmetric.MetricDataTypeGauge, pmetric.MetricValueTypeDouble case ocmetrics.MetricDescriptor_CUMULATIVE_INT64: - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - return pdata.MetricDataTypeSum, pdata.MetricValueTypeInt + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + return pmetric.MetricDataTypeSum, pmetric.MetricValueTypeInt case ocmetrics.MetricDescriptor_CUMULATIVE_DOUBLE: - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - return pdata.MetricDataTypeSum, pdata.MetricValueTypeDouble + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + return pmetric.MetricDataTypeSum, pmetric.MetricValueTypeDouble case ocmetrics.MetricDescriptor_CUMULATIVE_DISTRIBUTION: - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) histo := metric.Histogram() - histo.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) - return pdata.MetricDataTypeHistogram, pdata.MetricValueTypeNone + histo.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) + return pmetric.MetricDataTypeHistogram, pmetric.MetricValueTypeNone case ocmetrics.MetricDescriptor_SUMMARY: - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) // no temporality specified for summary metric - return pdata.MetricDataTypeSummary, pdata.MetricValueTypeNone + return pmetric.MetricDataTypeSummary, pmetric.MetricValueTypeNone } - return pdata.MetricDataTypeNone, pdata.MetricValueTypeNone + return pmetric.MetricDataTypeNone, pmetric.MetricValueTypeNone } // setDataPoints converts OC timeseries to internal datapoints based on metric type -func setDataPoints(ocMetric *ocmetrics.Metric, metric pdata.Metric, valType pdata.MetricValueType) { +func setDataPoints(ocMetric *ocmetrics.Metric, metric pmetric.Metric, valType pmetric.MetricValueType) { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: fillNumberDataPoint(ocMetric, metric.Gauge().DataPoints(), valType) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: fillNumberDataPoint(ocMetric, metric.Sum().DataPoints(), valType) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: fillDoubleHistogramDataPoint(ocMetric, metric.Histogram().DataPoints()) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: fillDoubleSummaryDataPoint(ocMetric, metric.Summary().DataPoints()) } } -func fillAttributesMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocmetrics.LabelValue, attributesMap pdata.Map) { +func fillAttributesMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocmetrics.LabelValue, attributesMap pcommon.Map) { if len(ocLabelsKeys) == 0 || len(ocLabelValues) == 0 { return } @@ -224,7 +225,7 @@ func fillAttributesMap(ocLabelsKeys []*ocmetrics.LabelKey, ocLabelValues []*ocme } } -func fillNumberDataPoint(ocMetric *ocmetrics.Metric, dps pdata.NumberDataPointSlice, valType pdata.MetricValueType) { +func fillNumberDataPoint(ocMetric *ocmetrics.Metric, dps pmetric.NumberDataPointSlice, valType pmetric.MetricValueType) { ocPointsCount := getPointsCount(ocMetric) dps.EnsureCapacity(ocPointsCount) ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() @@ -232,7 +233,7 @@ func fillNumberDataPoint(ocMetric *ocmetrics.Metric, dps pdata.NumberDataPointSl if timeseries == nil { continue } - startTimestamp := pdata.NewTimestampFromTime(timeseries.GetStartTimestamp().AsTime()) + startTimestamp := pcommon.NewTimestampFromTime(timeseries.GetStartTimestamp().AsTime()) for _, point := range timeseries.GetPoints() { if point == nil { @@ -241,19 +242,19 @@ func fillNumberDataPoint(ocMetric *ocmetrics.Metric, dps pdata.NumberDataPointSl dp := dps.AppendEmpty() dp.SetStartTimestamp(startTimestamp) - dp.SetTimestamp(pdata.NewTimestampFromTime(point.GetTimestamp().AsTime())) + dp.SetTimestamp(pcommon.NewTimestampFromTime(point.GetTimestamp().AsTime())) fillAttributesMap(ocLabelsKeys, timeseries.LabelValues, dp.Attributes()) switch valType { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: dp.SetIntVal(point.GetInt64Value()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: dp.SetDoubleVal(point.GetDoubleValue()) } } } } -func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.HistogramDataPointSlice) { +func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pmetric.HistogramDataPointSlice) { ocPointsCount := getPointsCount(ocMetric) dps.EnsureCapacity(ocPointsCount) ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() @@ -261,7 +262,7 @@ func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.Histogra if timeseries == nil { continue } - startTimestamp := pdata.NewTimestampFromTime(timeseries.GetStartTimestamp().AsTime()) + startTimestamp := pcommon.NewTimestampFromTime(timeseries.GetStartTimestamp().AsTime()) for _, point := range timeseries.GetPoints() { if point == nil { @@ -270,7 +271,7 @@ func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.Histogra dp := dps.AppendEmpty() dp.SetStartTimestamp(startTimestamp) - dp.SetTimestamp(pdata.NewTimestampFromTime(point.GetTimestamp().AsTime())) + dp.SetTimestamp(pcommon.NewTimestampFromTime(point.GetTimestamp().AsTime())) fillAttributesMap(ocLabelsKeys, timeseries.LabelValues, dp.Attributes()) distributionValue := point.GetDistributionValue() dp.SetSum(distributionValue.GetSum()) @@ -281,7 +282,7 @@ func fillDoubleHistogramDataPoint(ocMetric *ocmetrics.Metric, dps pdata.Histogra } } -func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDataPointSlice) { +func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pmetric.SummaryDataPointSlice) { ocPointsCount := getPointsCount(ocMetric) dps.EnsureCapacity(ocPointsCount) ocLabelsKeys := ocMetric.GetMetricDescriptor().GetLabelKeys() @@ -289,7 +290,7 @@ func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDat if timeseries == nil { continue } - startTimestamp := pdata.NewTimestampFromTime(timeseries.GetStartTimestamp().AsTime()) + startTimestamp := pcommon.NewTimestampFromTime(timeseries.GetStartTimestamp().AsTime()) for _, point := range timeseries.GetPoints() { if point == nil { @@ -298,7 +299,7 @@ func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDat dp := dps.AppendEmpty() dp.SetStartTimestamp(startTimestamp) - dp.SetTimestamp(pdata.NewTimestampFromTime(point.GetTimestamp().AsTime())) + dp.SetTimestamp(pcommon.NewTimestampFromTime(point.GetTimestamp().AsTime())) fillAttributesMap(ocLabelsKeys, timeseries.LabelValues, dp.Attributes()) summaryValue := point.GetSummaryValue() dp.SetSum(summaryValue.GetSum().GetValue()) @@ -308,7 +309,7 @@ func fillDoubleSummaryDataPoint(ocMetric *ocmetrics.Metric, dps pdata.SummaryDat } } -func ocHistogramBucketsToMetrics(ocBuckets []*ocmetrics.DistributionValue_Bucket, dp pdata.HistogramDataPoint) { +func ocHistogramBucketsToMetrics(ocBuckets []*ocmetrics.DistributionValue_Bucket, dp pmetric.HistogramDataPoint) { if len(ocBuckets) == 0 { return } @@ -323,12 +324,12 @@ func ocHistogramBucketsToMetrics(ocBuckets []*ocmetrics.DistributionValue_Bucket dp.SetBucketCounts(buckets) } -func ocSummaryPercentilesToMetrics(ocPercentiles []*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile, dp pdata.SummaryDataPoint) { +func ocSummaryPercentilesToMetrics(ocPercentiles []*ocmetrics.SummaryValue_Snapshot_ValueAtPercentile, dp pmetric.SummaryDataPoint) { if len(ocPercentiles) == 0 { return } - quantiles := pdata.NewValueAtQuantileSlice() + quantiles := pmetric.NewValueAtQuantileSlice() quantiles.EnsureCapacity(len(ocPercentiles)) for _, percentile := range ocPercentiles { @@ -340,9 +341,9 @@ func ocSummaryPercentilesToMetrics(ocPercentiles []*ocmetrics.SummaryValue_Snaps quantiles.CopyTo(dp.QuantileValues()) } -func exemplarToMetrics(ocExemplar *ocmetrics.DistributionValue_Exemplar, exemplar pdata.Exemplar) { +func exemplarToMetrics(ocExemplar *ocmetrics.DistributionValue_Exemplar, exemplar pmetric.Exemplar) { if ocExemplar.GetTimestamp() != nil { - exemplar.SetTimestamp(pdata.NewTimestampFromTime(ocExemplar.GetTimestamp().AsTime())) + exemplar.SetTimestamp(pcommon.NewTimestampFromTime(ocExemplar.GetTimestamp().AsTime())) } ocAttachments := ocExemplar.GetAttachments() exemplar.SetDoubleVal(ocExemplar.GetValue()) diff --git a/pkg/translator/opencensus/oc_to_metrics_test.go b/pkg/translator/opencensus/oc_to_metrics_test.go index b3c5a0c477a7..09a5d78b9454 100644 --- a/pkg/translator/opencensus/oc_to_metrics_test.go +++ b/pkg/translator/opencensus/oc_to_metrics_test.go @@ -22,7 +22,7 @@ import ( ocmetrics "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" ) @@ -31,12 +31,12 @@ func TestOCToMetrics(t *testing.T) { tests := []struct { name string oc *agentmetricspb.ExportMetricsServiceRequest - internal pdata.Metrics + internal pmetric.Metrics }{ { name: "empty", oc: &agentmetricspb.ExportMetricsServiceRequest{}, - internal: pdata.NewMetrics(), + internal: pmetric.NewMetrics(), }, { @@ -139,7 +139,7 @@ func TestOCToMetrics(t *testing.T) { func TestOCToMetrics_ResourceInMetric(t *testing.T) { internal := testdata.GenerateMetricsOneMetric() - want := pdata.NewMetrics() + want := pmetric.NewMetrics() internal.Clone().ResourceMetrics().MoveAndAppendTo(want.ResourceMetrics()) internal.Clone().ResourceMetrics().MoveAndAppendTo(want.ResourceMetrics()) want.ResourceMetrics().At(1).Resource().Attributes().UpsertString("resource-attr", "another-value") @@ -154,7 +154,7 @@ func TestOCToMetrics_ResourceInMetric(t *testing.T) { func TestOCToMetrics_ResourceInMetricOnly(t *testing.T) { internal := testdata.GenerateMetricsOneMetric() - want := pdata.NewMetrics() + want := pmetric.NewMetrics() internal.Clone().ResourceMetrics().MoveAndAppendTo(want.ResourceMetrics()) oc := generateOCTestDataMetricsOneMetric() // Move resource to metric level. diff --git a/pkg/translator/opencensus/oc_to_resource.go b/pkg/translator/opencensus/oc_to_resource.go index c4f3872a2e34..80ee854e1be9 100644 --- a/pkg/translator/opencensus/oc_to_resource.go +++ b/pkg/translator/opencensus/oc_to_resource.go @@ -20,8 +20,8 @@ import ( occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "go.opencensus.io/resource/resourcekeys" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" ) @@ -43,7 +43,7 @@ func getOCLangCodeToLangMap() map[occommon.LibraryInfo_Language]string { return mappings } -func ocNodeResourceToInternal(ocNode *occommon.Node, ocResource *ocresource.Resource, dest pdata.Resource) { +func ocNodeResourceToInternal(ocNode *occommon.Node, ocResource *ocresource.Resource, dest pcommon.Resource) { if ocNode == nil && ocResource == nil { return } diff --git a/pkg/translator/opencensus/oc_to_resource_test.go b/pkg/translator/opencensus/oc_to_resource_test.go index cd5497f4f791..9451ff201285 100644 --- a/pkg/translator/opencensus/oc_to_resource_test.go +++ b/pkg/translator/opencensus/oc_to_resource_test.go @@ -22,14 +22,14 @@ import ( agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "google.golang.org/protobuf/proto" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" ) func TestOcNodeResourceToInternal(t *testing.T) { - resource := pdata.NewResource() + resource := pcommon.NewResource() ocNodeResourceToInternal(nil, nil, resource) assert.Equal(t, 0, resource.Attributes().Len()) @@ -42,13 +42,13 @@ func TestOcNodeResourceToInternal(t *testing.T) { ocResource = generateOcResource() expectedAttrs := generateResourceWithOcNodeAndResource().Attributes() // We don't have type information in ocResource, so need to make int attr string - expectedAttrs.Upsert("resource-int-attr", pdata.NewValueString("123")) + expectedAttrs.Upsert("resource-int-attr", pcommon.NewValueString("123")) ocNodeResourceToInternal(ocNode, ocResource, resource) assert.EqualValues(t, expectedAttrs.Sort(), resource.Attributes().Sort()) // Make sure hard-coded fields override same-name values in Attributes. // To do that add Attributes with same-name. - expectedAttrs.Range(func(k string, v pdata.Value) bool { + expectedAttrs.Range(func(k string, v pcommon.Value) bool { // Set all except "attr1" which is not a hard-coded field to some bogus values. if !strings.Contains(k, "-attr") { ocNode.Attributes[k] = "this will be overridden 1" @@ -58,7 +58,7 @@ func TestOcNodeResourceToInternal(t *testing.T) { ocResource.Labels[occonventions.AttributeResourceType] = "this will be overridden 2" // Convert again. - resource = pdata.NewResource() + resource = pcommon.NewResource() ocNodeResourceToInternal(ocNode, ocResource, resource) // And verify that same-name attributes were ignored. assert.EqualValues(t, expectedAttrs.Sort(), resource.Attributes().Sort()) @@ -70,7 +70,7 @@ func BenchmarkOcNodeResourceToInternal(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - resource := pdata.NewResource() + resource := pcommon.NewResource() ocNodeResourceToInternal(ocNode, ocResource, resource) if ocNode.Identifier.Pid != 123 { b.Fail() diff --git a/pkg/translator/opencensus/oc_to_traces.go b/pkg/translator/opencensus/oc_to_traces.go index a781b3288d51..2f06a79a4c64 100644 --- a/pkg/translator/opencensus/oc_to_traces.go +++ b/pkg/translator/opencensus/oc_to_traces.go @@ -21,8 +21,9 @@ import ( ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" "go.opencensus.io/trace" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/types/known/wrapperspb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -30,10 +31,10 @@ import ( ) // OCToTraces may be used only by OpenCensus receiver and exporter implementations. -// Deprecated: use pdata.Traces instead. +// Deprecated: use ptrace.Traces instead. // TODO: move this function to OpenCensus package. -func OCToTraces(node *occommon.Node, resource *ocresource.Resource, spans []*octrace.Span) pdata.Traces { - traceData := pdata.NewTraces() +func OCToTraces(node *occommon.Node, resource *ocresource.Resource, spans []*octrace.Span) ptrace.Traces { + traceData := ptrace.NewTraces() if node == nil && resource == nil && len(spans) == 0 { return traceData } @@ -114,13 +115,13 @@ func OCToTraces(node *occommon.Node, resource *ocresource.Resource, spans []*oct return traceData } -func ocSpanToResourceSpans(ocSpan *octrace.Span, node *occommon.Node, dest pdata.ResourceSpans) { +func ocSpanToResourceSpans(ocSpan *octrace.Span, node *occommon.Node, dest ptrace.ResourceSpans) { ocNodeResourceToInternal(node, ocSpan.Resource, dest.Resource()) ilss := dest.ScopeSpans() ocSpanToInternal(ocSpan, ilss.AppendEmpty().Spans().AppendEmpty()) } -func ocSpanToInternal(src *octrace.Span, dest pdata.Span) { +func ocSpanToInternal(src *octrace.Span, dest ptrace.Span) { // Note that ocSpanKindToInternal must be called before initAttributeMapFromOC // since it may modify src.Attributes (remove the attribute which represents the // span kind). @@ -132,8 +133,8 @@ func ocSpanToInternal(src *octrace.Span, dest pdata.Span) { dest.SetParentSpanID(spanIDToInternal(src.ParentSpanId)) dest.SetName(src.Name.GetValue()) - dest.SetStartTimestamp(pdata.NewTimestampFromTime(src.StartTime.AsTime())) - dest.SetEndTimestamp(pdata.NewTimestampFromTime(src.EndTime.AsTime())) + dest.SetStartTimestamp(pcommon.NewTimestampFromTime(src.StartTime.AsTime())) + dest.SetEndTimestamp(pcommon.NewTimestampFromTime(src.EndTime.AsTime())) ocStatusToInternal(src.Status, src.Attributes, dest.Status()) @@ -144,41 +145,41 @@ func ocSpanToInternal(src *octrace.Span, dest pdata.Span) { ocSameProcessAsParentSpanToInternal(src.SameProcessAsParentSpan, dest) } -// Transforms the byte slice trace ID into a [16]byte internal pdata.TraceID. +// Transforms the byte slice trace ID into a [16]byte internal pcommon.TraceID. // If larger input then it is truncated to 16 bytes. -func traceIDToInternal(traceID []byte) pdata.TraceID { +func traceIDToInternal(traceID []byte) pcommon.TraceID { tid := [16]byte{} copy(tid[:], traceID) - return pdata.NewTraceID(tid) + return pcommon.NewTraceID(tid) } -// Transforms the byte slice span ID into a [8]byte internal pdata.SpanID. +// Transforms the byte slice span ID into a [8]byte internal pcommon.SpanID. // If larger input then it is truncated to 8 bytes. -func spanIDToInternal(spanID []byte) pdata.SpanID { +func spanIDToInternal(spanID []byte) pcommon.SpanID { sid := [8]byte{} copy(sid[:], spanID) - return pdata.NewSpanID(sid) + return pcommon.NewSpanID(sid) } -func ocStatusToInternal(ocStatus *octrace.Status, ocAttrs *octrace.Span_Attributes, dest pdata.SpanStatus) { +func ocStatusToInternal(ocStatus *octrace.Status, ocAttrs *octrace.Span_Attributes, dest ptrace.SpanStatus) { if ocStatus == nil { return } - var code pdata.StatusCode + var code ptrace.StatusCode switch ocStatus.Code { case trace.StatusCodeOK: - code = pdata.StatusCodeUnset + code = ptrace.StatusCodeUnset default: // all other OC status codes are errors. - code = pdata.StatusCodeError + code = ptrace.StatusCodeError } if ocAttrs != nil { // If conventions.OtelStatusCode is set, it must override the status code value. // See the reverse translation in traces_to_oc.go:statusToOC(). if attr, ok := ocAttrs.AttributeMap[conventions.OtelStatusCode]; ok { - code = pdata.StatusCode(attr.GetIntValue()) + code = ptrace.StatusCode(attr.GetIntValue()) delete(ocAttrs.AttributeMap, conventions.OtelStatusCode) } } @@ -188,7 +189,7 @@ func ocStatusToInternal(ocStatus *octrace.Status, ocAttrs *octrace.Span_Attribut } // Convert tracestate to W3C format. See the https://w3c.github.io/trace-context/ -func ocTraceStateToInternal(ocTracestate *octrace.Span_Tracestate) pdata.TraceState { +func ocTraceStateToInternal(ocTracestate *octrace.Span_Tracestate) ptrace.TraceState { if ocTracestate == nil { return "" } @@ -202,7 +203,7 @@ func ocTraceStateToInternal(ocTracestate *octrace.Span_Tracestate) pdata.TraceSt sb.WriteString("=") sb.WriteString(entry.Value) } - return pdata.TraceState(sb.String()) + return ptrace.TraceState(sb.String()) } func ocAttrsToDroppedAttributes(ocAttrs *octrace.Span_Attributes) uint32 { @@ -213,7 +214,7 @@ func ocAttrsToDroppedAttributes(ocAttrs *octrace.Span_Attributes) uint32 { } // initAttributeMapFromOC initialize AttributeMap from OC attributes -func initAttributeMapFromOC(ocAttrs *octrace.Span_Attributes, dest pdata.Map) { +func initAttributeMapFromOC(ocAttrs *octrace.Span_Attributes, dest pcommon.Map) { if ocAttrs == nil || len(ocAttrs.AttributeMap) == 0 { return } @@ -236,13 +237,13 @@ func initAttributeMapFromOC(ocAttrs *octrace.Span_Attributes, dest pdata.Map) { } } -func ocSpanKindToInternal(ocKind octrace.Span_SpanKind, ocAttrs *octrace.Span_Attributes) pdata.SpanKind { +func ocSpanKindToInternal(ocKind octrace.Span_SpanKind, ocAttrs *octrace.Span_Attributes) ptrace.SpanKind { switch ocKind { case octrace.Span_SERVER: - return pdata.SpanKindServer + return ptrace.SpanKindServer case octrace.Span_CLIENT: - return pdata.SpanKindClient + return ptrace.SpanKindClient case octrace.Span_SPAN_KIND_UNSPECIFIED: // Span kind field is unspecified, check if TagSpanKind attribute is set. @@ -253,30 +254,30 @@ func ocSpanKindToInternal(ocKind octrace.Span_SpanKind, ocAttrs *octrace.Span_At if kindAttr != nil { strVal, ok := kindAttr.Value.(*octrace.AttributeValue_StringValue) if ok && strVal != nil { - var otlpKind pdata.SpanKind + var otlpKind ptrace.SpanKind switch tracetranslator.OpenTracingSpanKind(strVal.StringValue.GetValue()) { case tracetranslator.OpenTracingSpanKindConsumer: - otlpKind = pdata.SpanKindConsumer + otlpKind = ptrace.SpanKindConsumer case tracetranslator.OpenTracingSpanKindProducer: - otlpKind = pdata.SpanKindProducer + otlpKind = ptrace.SpanKindProducer case tracetranslator.OpenTracingSpanKindInternal: - otlpKind = pdata.SpanKindInternal + otlpKind = ptrace.SpanKindInternal default: - return pdata.SpanKindUnspecified + return ptrace.SpanKindUnspecified } delete(ocAttrs.AttributeMap, tracetranslator.TagSpanKind) return otlpKind } } } - return pdata.SpanKindUnspecified + return ptrace.SpanKindUnspecified default: - return pdata.SpanKindUnspecified + return ptrace.SpanKindUnspecified } } -func ocEventsToInternal(ocEvents *octrace.Span_TimeEvents, dest pdata.Span) { +func ocEventsToInternal(ocEvents *octrace.Span_TimeEvents, dest ptrace.Span) { if ocEvents == nil { return } @@ -297,7 +298,7 @@ func ocEventsToInternal(ocEvents *octrace.Span_TimeEvents, dest pdata.Span) { } event := events.AppendEmpty() - event.SetTimestamp(pdata.NewTimestampFromTime(ocEvent.Time.AsTime())) + event.SetTimestamp(pcommon.NewTimestampFromTime(ocEvent.Time.AsTime())) switch teValue := ocEvent.Value.(type) { case *octrace.Span_TimeEvent_Annotation_: @@ -319,7 +320,7 @@ func ocEventsToInternal(ocEvents *octrace.Span_TimeEvents, dest pdata.Span) { } } -func ocLinksToInternal(ocLinks *octrace.Span_Links, dest pdata.Span) { +func ocLinksToInternal(ocLinks *octrace.Span_Links, dest ptrace.Span) { if ocLinks == nil { return } @@ -347,7 +348,7 @@ func ocLinksToInternal(ocLinks *octrace.Span_Links, dest pdata.Span) { } } -func ocMessageEventToInternalAttrs(msgEvent *octrace.Span_TimeEvent_MessageEvent, dest pdata.Map) { +func ocMessageEventToInternalAttrs(msgEvent *octrace.Span_TimeEvent_MessageEvent, dest pcommon.Map) { if msgEvent == nil { return } @@ -358,7 +359,7 @@ func ocMessageEventToInternalAttrs(msgEvent *octrace.Span_TimeEvent_MessageEvent dest.UpsertInt(conventions.AttributeMessagingMessagePayloadCompressedSizeBytes, int64(msgEvent.CompressedSize)) } -func ocSameProcessAsParentSpanToInternal(spaps *wrapperspb.BoolValue, dest pdata.Span) { +func ocSameProcessAsParentSpanToInternal(spaps *wrapperspb.BoolValue, dest ptrace.Span) { if spaps == nil { return } diff --git a/pkg/translator/opencensus/oc_to_traces_test.go b/pkg/translator/opencensus/oc_to_traces_test.go index 7978f23de067..b4ac0f94d906 100644 --- a/pkg/translator/opencensus/oc_to_traces_test.go +++ b/pkg/translator/opencensus/oc_to_traces_test.go @@ -22,7 +22,8 @@ import ( ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/wrapperspb" @@ -53,32 +54,32 @@ func TestOcTraceStateToInternal(t *testing.T) { } func TestInitAttributeMapFromOC(t *testing.T) { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() initAttributeMapFromOC(nil, attrs) - assert.EqualValues(t, pdata.NewMap(), attrs) + assert.EqualValues(t, pcommon.NewMap(), attrs) assert.EqualValues(t, 0, ocAttrsToDroppedAttributes(nil)) ocAttrs := &octrace.Span_Attributes{} - attrs = pdata.NewMap() + attrs = pcommon.NewMap() initAttributeMapFromOC(ocAttrs, attrs) - assert.EqualValues(t, pdata.NewMap(), attrs) + assert.EqualValues(t, pcommon.NewMap(), attrs) assert.EqualValues(t, 0, ocAttrsToDroppedAttributes(ocAttrs)) ocAttrs = &octrace.Span_Attributes{ DroppedAttributesCount: 123, } - attrs = pdata.NewMap() + attrs = pcommon.NewMap() initAttributeMapFromOC(ocAttrs, attrs) - assert.EqualValues(t, pdata.NewMap(), attrs) + assert.EqualValues(t, pcommon.NewMap(), attrs) assert.EqualValues(t, 123, ocAttrsToDroppedAttributes(ocAttrs)) ocAttrs = &octrace.Span_Attributes{ AttributeMap: map[string]*octrace.AttributeValue{}, DroppedAttributesCount: 234, } - attrs = pdata.NewMap() + attrs = pcommon.NewMap() initAttributeMapFromOC(ocAttrs, attrs) - assert.EqualValues(t, pdata.NewMap(), attrs) + assert.EqualValues(t, pcommon.NewMap(), attrs) assert.EqualValues(t, 234, ocAttrsToDroppedAttributes(ocAttrs)) ocAttrs = &octrace.Span_Attributes{ @@ -89,10 +90,10 @@ func TestInitAttributeMapFromOC(t *testing.T) { }, DroppedAttributesCount: 234, } - attrs = pdata.NewMap() + attrs = pcommon.NewMap() initAttributeMapFromOC(ocAttrs, attrs) assert.EqualValues(t, - pdata.NewMapFromRaw( + pcommon.NewMapFromRaw( map[string]interface{}{ "abc": "def", }), @@ -108,10 +109,10 @@ func TestInitAttributeMapFromOC(t *testing.T) { ocAttrs.AttributeMap["doubleval"] = &octrace.AttributeValue{ Value: &octrace.AttributeValue_DoubleValue{DoubleValue: 4.5}, } - attrs = pdata.NewMap() + attrs = pcommon.NewMap() initAttributeMapFromOC(ocAttrs, attrs) - expectedAttr := pdata.NewMapFromRaw(map[string]interface{}{ + expectedAttr := pcommon.NewMapFromRaw(map[string]interface{}{ "abc": "def", "intval": 345, "boolval": true, @@ -125,19 +126,19 @@ func TestOcSpanKindToInternal(t *testing.T) { tests := []struct { ocAttrs *octrace.Span_Attributes ocKind octrace.Span_SpanKind - otlpKind pdata.SpanKind + otlpKind ptrace.SpanKind }{ { ocKind: octrace.Span_CLIENT, - otlpKind: pdata.SpanKindClient, + otlpKind: ptrace.SpanKindClient, }, { ocKind: octrace.Span_SERVER, - otlpKind: pdata.SpanKindServer, + otlpKind: ptrace.SpanKindServer, }, { ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, - otlpKind: pdata.SpanKindUnspecified, + otlpKind: ptrace.SpanKindUnspecified, }, { ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, @@ -147,7 +148,7 @@ func TestOcSpanKindToInternal(t *testing.T) { StringValue: &octrace.TruncatableString{Value: "consumer"}}}, }, }, - otlpKind: pdata.SpanKindConsumer, + otlpKind: ptrace.SpanKindConsumer, }, { ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, @@ -157,7 +158,7 @@ func TestOcSpanKindToInternal(t *testing.T) { StringValue: &octrace.TruncatableString{Value: "producer"}}}, }, }, - otlpKind: pdata.SpanKindProducer, + otlpKind: ptrace.SpanKindProducer, }, { ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, @@ -167,7 +168,7 @@ func TestOcSpanKindToInternal(t *testing.T) { IntValue: 123}}, }, }, - otlpKind: pdata.SpanKindUnspecified, + otlpKind: ptrace.SpanKindUnspecified, }, { ocKind: octrace.Span_CLIENT, @@ -177,7 +178,7 @@ func TestOcSpanKindToInternal(t *testing.T) { StringValue: &octrace.TruncatableString{Value: "consumer"}}}, }, }, - otlpKind: pdata.SpanKindClient, + otlpKind: ptrace.SpanKindClient, }, { ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, @@ -187,7 +188,7 @@ func TestOcSpanKindToInternal(t *testing.T) { StringValue: &octrace.TruncatableString{Value: "internal"}}}, }, }, - otlpKind: pdata.SpanKindInternal, + otlpKind: ptrace.SpanKindInternal, }, } @@ -303,14 +304,14 @@ func TestOcToInternal(t *testing.T) { tests := []struct { name string - td pdata.Traces + td ptrace.Traces node *occommon.Node resource *ocresource.Resource spans []*octrace.Span }{ { name: "empty", - td: pdata.NewTraces(), + td: ptrace.NewTraces(), }, { @@ -390,7 +391,7 @@ func TestOcToInternal(t *testing.T) { } func TestOcSameProcessAsParentSpanToInternal(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() ocSameProcessAsParentSpanToInternal(nil, span) assert.Equal(t, 0, span.Attributes().Len()) @@ -398,14 +399,14 @@ func TestOcSameProcessAsParentSpanToInternal(t *testing.T) { assert.Equal(t, 1, span.Attributes().Len()) v, ok := span.Attributes().Get(occonventions.AttributeSameProcessAsParentSpan) assert.True(t, ok) - assert.EqualValues(t, pdata.ValueTypeBool, v.Type()) + assert.EqualValues(t, pcommon.ValueTypeBool, v.Type()) assert.False(t, v.BoolVal()) ocSameProcessAsParentSpanToInternal(wrapperspb.Bool(true), span) assert.Equal(t, 1, span.Attributes().Len()) v, ok = span.Attributes().Get(occonventions.AttributeSameProcessAsParentSpan) assert.True(t, ok) - assert.EqualValues(t, pdata.ValueTypeBool, v.Type()) + assert.EqualValues(t, pcommon.ValueTypeBool, v.Type()) assert.True(t, v.BoolVal()) } diff --git a/pkg/translator/opencensus/resource_to_oc.go b/pkg/translator/opencensus/resource_to_oc.go index 0d2636cde108..6567a8f5a2c3 100644 --- a/pkg/translator/opencensus/resource_to_oc.go +++ b/pkg/translator/opencensus/resource_to_oc.go @@ -21,8 +21,8 @@ import ( occommon "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" "go.opencensus.io/resource/resourcekeys" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "google.golang.org/protobuf/types/known/timestamppb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -78,7 +78,7 @@ func getSDKLangToOCLangCodeMap() map[string]int32 { return mappings } -func internalResourceToOC(resource pdata.Resource) (*occommon.Node, *ocresource.Resource) { +func internalResourceToOC(resource pcommon.Resource) (*occommon.Node, *ocresource.Resource) { attrs := resource.Attributes() if attrs.Len() == 0 { return nil, nil @@ -87,7 +87,7 @@ func internalResourceToOC(resource pdata.Resource) (*occommon.Node, *ocresource. ocNode := &occommon.Node{} ocResource := &ocresource.Resource{} labels := make(map[string]string, attrs.Len()) - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { val := v.AsString() switch k { diff --git a/pkg/translator/opencensus/resource_to_oc_test.go b/pkg/translator/opencensus/resource_to_oc_test.go index 1d5e2748630e..67b2ac0e0376 100644 --- a/pkg/translator/opencensus/resource_to_oc_test.go +++ b/pkg/translator/opencensus/resource_to_oc_test.go @@ -24,8 +24,9 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "go.opencensus.io/resource/resourcekeys" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/testing/protocmp" @@ -34,7 +35,7 @@ import ( ) func TestResourceToOC(t *testing.T) { - emptyResource := pdata.NewResource() + emptyResource := pcommon.NewResource() ocNode := generateOcNode() ocResource := generateOcResource() @@ -45,13 +46,13 @@ func TestResourceToOC(t *testing.T) { tests := []struct { name string - resource pdata.Resource + resource pcommon.Resource ocNode *occommon.Node ocResource *ocresource.Resource }{ { name: "nil", - resource: pdata.NewResource(), + resource: pcommon.NewResource(), ocNode: nil, ocResource: nil, }, @@ -81,7 +82,7 @@ func TestResourceToOC(t *testing.T) { } func TestContainerResourceToOC(t *testing.T) { - resource := pdata.NewResource() + resource := pcommon.NewResource() resource.Attributes().InsertString(conventions.AttributeK8SClusterName, "cluster1") resource.Attributes().InsertString(conventions.AttributeK8SPodName, "pod1") resource.Attributes().InsertString(conventions.AttributeK8SNamespaceName, "namespace1") @@ -200,27 +201,27 @@ func TestResourceToOCAndBack(t *testing.T) { } for _, test := range tests { t.Run(string(test), func(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() goldendataset.GenerateResource(test).CopyTo(traces.ResourceSpans().AppendEmpty().Resource()) expected := traces.ResourceSpans().At(0).Resource() ocNode, ocResource := internalResourceToOC(expected) - actual := pdata.NewResource() + actual := pcommon.NewResource() ocNodeResourceToInternal(ocNode, ocResource, actual) // Remove opencensus resource type from actual. This will be added during translation. actual.Attributes().Delete(occonventions.AttributeResourceType) assert.Equal(t, expected.Attributes().Len(), actual.Attributes().Len()) - expected.Attributes().Range(func(k string, v pdata.Value) bool { + expected.Attributes().Range(func(k string, v pcommon.Value) bool { a, ok := actual.Attributes().Get(k) assert.True(t, ok) switch v.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: // conventions.AttributeProcessID is special because we preserve the type for this. if k == conventions.AttributeProcessPID { assert.Equal(t, v.IntVal(), a.IntVal()) } else { assert.Equal(t, strconv.FormatInt(v.IntVal(), 10), a.StringVal()) } - case pdata.ValueTypeMap, pdata.ValueTypeSlice: + case pcommon.ValueTypeMap, pcommon.ValueTypeSlice: assert.Equal(t, a, a) default: assert.Equal(t, v, a) diff --git a/pkg/translator/opencensus/timestamp.go b/pkg/translator/opencensus/timestamp.go index cf0c6d224394..8f050500344e 100644 --- a/pkg/translator/opencensus/timestamp.go +++ b/pkg/translator/opencensus/timestamp.go @@ -15,12 +15,12 @@ package opencensus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "google.golang.org/protobuf/types/known/timestamppb" ) -// timestampAsTimestampPb converts a pdata.Timestamp to a protobuf known type Timestamp. -func timestampAsTimestampPb(ts pdata.Timestamp) *timestamppb.Timestamp { +// timestampAsTimestampPb converts a pcommon.Timestamp to a protobuf known type Timestamp. +func timestampAsTimestampPb(ts pcommon.Timestamp) *timestamppb.Timestamp { if ts == 0 { return nil } diff --git a/pkg/translator/opencensus/traces_to_oc.go b/pkg/translator/opencensus/traces_to_oc.go index 61d923bc761c..880a007a33c8 100644 --- a/pkg/translator/opencensus/traces_to_oc.go +++ b/pkg/translator/opencensus/traces_to_oc.go @@ -22,8 +22,9 @@ import ( ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" "go.opencensus.io/trace" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/types/known/wrapperspb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -31,9 +32,9 @@ import ( ) // ResourceSpansToOC may be used only by OpenCensus receiver and exporter implementations. -// Deprecated: Use pdata.Traces. +// Deprecated: Use ptrace.Traces. // TODO: move this function to OpenCensus package. -func ResourceSpansToOC(rs pdata.ResourceSpans) (*occommon.Node, *ocresource.Resource, []*octrace.Span) { +func ResourceSpansToOC(rs ptrace.ResourceSpans) (*occommon.Node, *ocresource.Resource, []*octrace.Span) { node, resource := internalResourceToOC(rs.Resource()) ilss := rs.ScopeSpans() if ilss.Len() == 0 { @@ -53,7 +54,7 @@ func ResourceSpansToOC(rs pdata.ResourceSpans) (*occommon.Node, *ocresource.Reso return node, resource, ocSpans } -func spanToOC(span pdata.Span) *octrace.Span { +func spanToOC(span ptrace.Span) *octrace.Span { spaps := attributesMapToOCSameProcessAsParentSpan(span.Attributes()) attributes := attributesMapToOCSpanAttributes(span.Attributes(), span.DroppedAttributesCount()) if kindAttr := spanKindToOCAttribute(span.Kind()); kindAttr != nil { @@ -95,7 +96,7 @@ func spanToOC(span pdata.Span) *octrace.Span { } } -func attributesMapToOCSpanAttributes(attributes pdata.Map, droppedCount uint32) *octrace.Span_Attributes { +func attributesMapToOCSpanAttributes(attributes pcommon.Map, droppedCount uint32) *octrace.Span_Attributes { if attributes.Len() == 0 && droppedCount == 0 { return nil } @@ -106,44 +107,44 @@ func attributesMapToOCSpanAttributes(attributes pdata.Map, droppedCount uint32) } } -func attributesMapToOCAttributeMap(attributes pdata.Map) map[string]*octrace.AttributeValue { +func attributesMapToOCAttributeMap(attributes pcommon.Map) map[string]*octrace.AttributeValue { if attributes.Len() == 0 { return nil } ocAttributes := make(map[string]*octrace.AttributeValue, attributes.Len()) - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { ocAttributes[k] = attributeValueToOC(v) return true }) return ocAttributes } -func attributeValueToOC(attr pdata.Value) *octrace.AttributeValue { +func attributeValueToOC(attr pcommon.Value) *octrace.AttributeValue { a := &octrace.AttributeValue{} switch attr.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: a.Value = &octrace.AttributeValue_StringValue{ StringValue: stringToTruncatableString(attr.StringVal()), } - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: a.Value = &octrace.AttributeValue_BoolValue{ BoolValue: attr.BoolVal(), } - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: a.Value = &octrace.AttributeValue_DoubleValue{ DoubleValue: attr.DoubleVal(), } - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: a.Value = &octrace.AttributeValue_IntValue{ IntValue: attr.IntVal(), } - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: a.Value = &octrace.AttributeValue_StringValue{ StringValue: stringToTruncatableString(attr.AsString()), } - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: a.Value = &octrace.AttributeValue_StringValue{ StringValue: stringToTruncatableString(attr.AsString()), } @@ -156,18 +157,18 @@ func attributeValueToOC(attr pdata.Value) *octrace.AttributeValue { return a } -func spanKindToOCAttribute(kind pdata.SpanKind) *octrace.AttributeValue { +func spanKindToOCAttribute(kind ptrace.SpanKind) *octrace.AttributeValue { var ocKind tracetranslator.OpenTracingSpanKind switch kind { - case pdata.SpanKindConsumer: + case ptrace.SpanKindConsumer: ocKind = tracetranslator.OpenTracingSpanKindConsumer - case pdata.SpanKindProducer: + case ptrace.SpanKindProducer: ocKind = tracetranslator.OpenTracingSpanKindProducer - case pdata.SpanKindInternal: + case ptrace.SpanKindInternal: ocKind = tracetranslator.OpenTracingSpanKindInternal - case pdata.SpanKindUnspecified: - case pdata.SpanKindServer: // explicitly handled as SpanKind - case pdata.SpanKindClient: // explicitly handled as SpanKind + case ptrace.SpanKindUnspecified: + case ptrace.SpanKindServer: // explicitly handled as SpanKind + case ptrace.SpanKindClient: // explicitly handled as SpanKind default: } @@ -188,16 +189,16 @@ func stringAttributeValue(val string) *octrace.AttributeValue { } } -func attributesMapToOCSameProcessAsParentSpan(attr pdata.Map) *wrapperspb.BoolValue { +func attributesMapToOCSameProcessAsParentSpan(attr pcommon.Map) *wrapperspb.BoolValue { val, ok := attr.Get(occonventions.AttributeSameProcessAsParentSpan) - if !ok || val.Type() != pdata.ValueTypeBool { + if !ok || val.Type() != pcommon.ValueTypeBool { return nil } return wrapperspb.Bool(val.BoolVal()) } // OTLP follows the W3C format, e.g. "vendorname1=opaqueValue1,vendorname2=opaqueValue2" -func traceStateToOC(traceState pdata.TraceState) *octrace.Span_Tracestate { +func traceStateToOC(traceState ptrace.TraceState) *octrace.Span_Tracestate { if traceState == "" { return nil } @@ -229,24 +230,24 @@ func traceStateToOC(traceState pdata.TraceState) *octrace.Span_Tracestate { } } -func spanKindToOC(kind pdata.SpanKind) octrace.Span_SpanKind { +func spanKindToOC(kind ptrace.SpanKind) octrace.Span_SpanKind { switch kind { - case pdata.SpanKindServer: + case ptrace.SpanKindServer: return octrace.Span_SERVER - case pdata.SpanKindClient: + case ptrace.SpanKindClient: return octrace.Span_CLIENT // NOTE: see `spanKindToOCAttribute` function for custom kinds - case pdata.SpanKindUnspecified: - case pdata.SpanKindInternal: - case pdata.SpanKindProducer: - case pdata.SpanKindConsumer: + case ptrace.SpanKindUnspecified: + case ptrace.SpanKindInternal: + case ptrace.SpanKindProducer: + case ptrace.SpanKindConsumer: default: } return octrace.Span_SPAN_KIND_UNSPECIFIED } -func eventsToOC(events pdata.SpanEventSlice, droppedCount uint32) *octrace.Span_TimeEvents { +func eventsToOC(events ptrace.SpanEventSlice, droppedCount uint32) *octrace.Span_TimeEvents { if events.Len() == 0 { if droppedCount == 0 { return nil @@ -268,7 +269,7 @@ func eventsToOC(events pdata.SpanEventSlice, droppedCount uint32) *octrace.Span_ } } -func eventToOC(event pdata.SpanEvent) *octrace.Span_TimeEvent { +func eventToOC(event ptrace.SpanEvent) *octrace.Span_TimeEvent { attrs := event.Attributes() // Consider TimeEvent to be of MessageEvent type if all and only relevant attributes are set @@ -280,7 +281,7 @@ func eventToOC(event pdata.SpanEvent) *octrace.Span_TimeEvent { } // TODO: Find a better way to check for message_event. Maybe use the event.Name. if attrs.Len() == len(ocMessageEventAttrs) { - ocMessageEventAttrValues := map[string]pdata.Value{} + ocMessageEventAttrValues := map[string]pcommon.Value{} var ocMessageEventAttrFound bool for _, attr := range ocMessageEventAttrs { akv, found := attrs.Get(attr) @@ -318,7 +319,7 @@ func eventToOC(event pdata.SpanEvent) *octrace.Span_TimeEvent { } } -func linksToOC(links pdata.SpanLinkSlice, droppedCount uint32) *octrace.Span_Links { +func linksToOC(links ptrace.SpanLinkSlice, droppedCount uint32) *octrace.Span_Links { if links.Len() == 0 { if droppedCount == 0 { return nil @@ -347,7 +348,7 @@ func linksToOC(links pdata.SpanLinkSlice, droppedCount uint32) *octrace.Span_Lin } } -func traceIDToOC(tid pdata.TraceID) []byte { +func traceIDToOC(tid pcommon.TraceID) []byte { if tid.IsEmpty() { return nil } @@ -355,7 +356,7 @@ func traceIDToOC(tid pdata.TraceID) []byte { return tidBytes[:] } -func spanIDToOC(sid pdata.SpanID) []byte { +func spanIDToOC(sid pcommon.SpanID) []byte { if sid.IsEmpty() { return nil } @@ -363,19 +364,19 @@ func spanIDToOC(sid pdata.SpanID) []byte { return sidBytes[:] } -func statusToOC(status pdata.SpanStatus) (*octrace.Status, *octrace.AttributeValue) { +func statusToOC(status ptrace.SpanStatus) (*octrace.Status, *octrace.AttributeValue) { var attr *octrace.AttributeValue var oc int32 switch status.Code() { - case pdata.StatusCodeUnset: + case ptrace.StatusCodeUnset: // Unset in OTLP corresponds to OK in OpenCensus. oc = trace.StatusCodeOK - case pdata.StatusCodeOk: + case ptrace.StatusCodeOk: // OK in OpenCensus is the closest to OK in OTLP. oc = trace.StatusCodeOK // We will also add an attribute to indicate that it is OTLP OK, different from OTLP Unset. attr = &octrace.AttributeValue{Value: &octrace.AttributeValue_IntValue{IntValue: int64(status.Code())}} - case pdata.StatusCodeError: + case ptrace.StatusCodeError: oc = trace.StatusCodeUnknown } diff --git a/pkg/translator/opencensus/traces_to_oc_test.go b/pkg/translator/opencensus/traces_to_oc_test.go index 1ed7dca058ab..343ec8282e91 100644 --- a/pkg/translator/opencensus/traces_to_oc_test.go +++ b/pkg/translator/opencensus/traces_to_oc_test.go @@ -21,7 +21,8 @@ import ( ocresource "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" octrace "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/wrapperspb" @@ -54,12 +55,12 @@ func TestInternalTraceStateToOC(t *testing.T) { } func TestAttributesMapToOC(t *testing.T) { - assert.EqualValues(t, (*octrace.Span_Attributes)(nil), attributesMapToOCSpanAttributes(pdata.NewMap(), 0)) + assert.EqualValues(t, (*octrace.Span_Attributes)(nil), attributesMapToOCSpanAttributes(pcommon.NewMap(), 0)) ocAttrs := &octrace.Span_Attributes{ DroppedAttributesCount: 123, } - assert.EqualValues(t, ocAttrs, attributesMapToOCSpanAttributes(pdata.NewMap(), 123)) + assert.EqualValues(t, ocAttrs, attributesMapToOCSpanAttributes(pcommon.NewMap(), 123)) ocAttrs = &octrace.Span_Attributes{ AttributeMap: map[string]*octrace.AttributeValue{ @@ -71,7 +72,7 @@ func TestAttributesMapToOC(t *testing.T) { } assert.EqualValues(t, ocAttrs, attributesMapToOCSpanAttributes( - pdata.NewMapFromRaw(map[string]interface{}{ + pcommon.NewMapFromRaw(map[string]interface{}{ "abc": "def", }), 234)) @@ -86,7 +87,7 @@ func TestAttributesMapToOC(t *testing.T) { Value: &octrace.AttributeValue_DoubleValue{DoubleValue: 4.5}, } assert.EqualValues(t, ocAttrs, - attributesMapToOCSpanAttributes(pdata.NewMapFromRaw( + attributesMapToOCSpanAttributes(pcommon.NewMapFromRaw( map[string]interface{}{ "abc": "def", "intval": 345, @@ -98,31 +99,31 @@ func TestAttributesMapToOC(t *testing.T) { func TestSpanKindToOC(t *testing.T) { tests := []struct { - kind pdata.SpanKind + kind ptrace.SpanKind ocKind octrace.Span_SpanKind }{ { - kind: pdata.SpanKindClient, + kind: ptrace.SpanKindClient, ocKind: octrace.Span_CLIENT, }, { - kind: pdata.SpanKindServer, + kind: ptrace.SpanKindServer, ocKind: octrace.Span_SERVER, }, { - kind: pdata.SpanKindConsumer, + kind: ptrace.SpanKindConsumer, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, { - kind: pdata.SpanKindProducer, + kind: ptrace.SpanKindProducer, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, { - kind: pdata.SpanKindUnspecified, + kind: ptrace.SpanKindUnspecified, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, { - kind: pdata.SpanKindInternal, + kind: ptrace.SpanKindInternal, ocKind: octrace.Span_SPAN_KIND_UNSPECIFIED, }, } @@ -136,7 +137,7 @@ func TestSpanKindToOC(t *testing.T) { } func TestAttributesMapTOOcSameProcessAsParentSpan(t *testing.T) { - attr := pdata.NewMap() + attr := pcommon.NewMap() assert.Nil(t, attributesMapToOCSameProcessAsParentSpan(attr)) attr.UpsertBool(occonventions.AttributeSameProcessAsParentSpan, true) @@ -151,11 +152,11 @@ func TestAttributesMapTOOcSameProcessAsParentSpan(t *testing.T) { func TestSpanKindToOCAttribute(t *testing.T) { tests := []struct { - kind pdata.SpanKind + kind ptrace.SpanKind ocAttribute *octrace.AttributeValue }{ { - kind: pdata.SpanKindConsumer, + kind: ptrace.SpanKindConsumer, ocAttribute: &octrace.AttributeValue{ Value: &octrace.AttributeValue_StringValue{ StringValue: &octrace.TruncatableString{ @@ -165,7 +166,7 @@ func TestSpanKindToOCAttribute(t *testing.T) { }, }, { - kind: pdata.SpanKindProducer, + kind: ptrace.SpanKindProducer, ocAttribute: &octrace.AttributeValue{ Value: &octrace.AttributeValue_StringValue{ StringValue: &octrace.TruncatableString{ @@ -175,7 +176,7 @@ func TestSpanKindToOCAttribute(t *testing.T) { }, }, { - kind: pdata.SpanKindInternal, + kind: ptrace.SpanKindInternal, ocAttribute: &octrace.AttributeValue{ Value: &octrace.AttributeValue_StringValue{ StringValue: &octrace.TruncatableString{ @@ -185,15 +186,15 @@ func TestSpanKindToOCAttribute(t *testing.T) { }, }, { - kind: pdata.SpanKindUnspecified, + kind: ptrace.SpanKindUnspecified, ocAttribute: nil, }, { - kind: pdata.SpanKindServer, + kind: ptrace.SpanKindServer, ocAttribute: nil, }, { - kind: pdata.SpanKindClient, + kind: ptrace.SpanKindClient, ocAttribute: nil, }, } @@ -289,7 +290,7 @@ func TestInternalToOC(t *testing.T) { tests := []struct { name string - td pdata.Traces + td ptrace.Traces Node *occommon.Node Resource *ocresource.Resource Spans []*octrace.Span diff --git a/pkg/translator/prometheusremotewrite/go.mod b/pkg/translator/prometheusremotewrite/go.mod index 847f6b063085..45760e1e3201 100644 --- a/pkg/translator/prometheusremotewrite/go.mod +++ b/pkg/translator/prometheusremotewrite/go.mod @@ -7,8 +7,9 @@ require ( github.com/prometheus/common v0.33.0 github.com/prometheus/prometheus v1.8.2-0.20220117154355-4855a0c067e2 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) @@ -22,3 +23,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/translator/prometheusremotewrite/go.sum b/pkg/translator/prometheusremotewrite/go.sum index 6255ee14d7d9..928f0a913af5 100644 --- a/pkg/translator/prometheusremotewrite/go.sum +++ b/pkg/translator/prometheusremotewrite/go.sum @@ -1234,10 +1234,12 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= diff --git a/pkg/translator/prometheusremotewrite/helper.go b/pkg/translator/prometheusremotewrite/helper.go index 1ecd0cf07c26..cd4d6fa4c3f9 100644 --- a/pkg/translator/prometheusremotewrite/helper.go +++ b/pkg/translator/prometheusremotewrite/helper.go @@ -29,8 +29,9 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/prompb" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) const ( @@ -158,14 +159,14 @@ func timeSeriesSignature(datatype string, labels *[]prompb.Label) string { // createAttributes creates a slice of Cortex Label with OTLP attributes and pairs of string values. // Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is // logged. Resultant label names are sanitized. -func createAttributes(resource pdata.Resource, attributes pdata.Map, externalLabels map[string]string, extras ...string) []prompb.Label { +func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label { // map ensures no duplicate label name l := map[string]prompb.Label{} // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. attributes.Sort() - attributes.Range(func(key string, value pdata.Value) bool { + attributes.Range(func(key string, value pcommon.Value) bool { if existingLabel, alreadyExists := l[sanitize(key)]; alreadyExists { existingLabel.Value = existingLabel.Value + ";" + value.AsString() l[sanitize(key)] = existingLabel @@ -237,7 +238,7 @@ func createAttributes(resource pdata.Resource, attributes pdata.Map, externalLab } // getPromMetricName creates a Prometheus metric name by attaching namespace prefix for Monotonic metrics. -func getPromMetricName(metric pdata.Metric, ns string) string { +func getPromMetricName(metric pmetric.Metric, ns string) string { name := metric.Name() if len(ns) > 0 { name = ns + "_" + name @@ -248,15 +249,15 @@ func getPromMetricName(metric pdata.Metric, ns string) string { // validateMetrics returns a bool representing whether the metric has a valid type and temporality combination and a // matching metric type and field -func validateMetrics(metric pdata.Metric) bool { +func validateMetrics(metric pmetric.Metric) bool { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return metric.Gauge().DataPoints().Len() != 0 - case pdata.MetricDataTypeSum: - return metric.Sum().DataPoints().Len() != 0 && metric.Sum().AggregationTemporality() == pdata.MetricAggregationTemporalityCumulative - case pdata.MetricDataTypeHistogram: - return metric.Histogram().DataPoints().Len() != 0 && metric.Histogram().AggregationTemporality() == pdata.MetricAggregationTemporalityCumulative - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSum: + return metric.Sum().DataPoints().Len() != 0 && metric.Sum().AggregationTemporality() == pmetric.MetricAggregationTemporalityCumulative + case pmetric.MetricDataTypeHistogram: + return metric.Histogram().DataPoints().Len() != 0 && metric.Histogram().AggregationTemporality() == pmetric.MetricAggregationTemporalityCumulative + case pmetric.MetricDataTypeSummary: return metric.Summary().DataPoints().Len() != 0 } return false @@ -264,7 +265,7 @@ func validateMetrics(metric pdata.Metric) bool { // addSingleNumberDataPoint converts the metric value stored in pt to a Prometheus sample, and add the sample // to its corresponding time series in tsMap -func addSingleNumberDataPoint(pt pdata.NumberDataPoint, resource pdata.Resource, metric pdata.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { +func addSingleNumberDataPoint(pt pmetric.NumberDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { // create parameters for addSample name := getPromMetricName(metric, settings.Namespace) labels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, name) @@ -273,12 +274,12 @@ func addSingleNumberDataPoint(pt pdata.NumberDataPoint, resource pdata.Resource, Timestamp: convertTimeStamp(pt.Timestamp()), } switch pt.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: sample.Value = float64(pt.IntVal()) - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: sample.Value = pt.DoubleVal() } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { sample.Value = math.Float64frombits(value.StaleNaN) } addSample(tsMap, sample, labels, metric.DataType().String()) @@ -286,7 +287,7 @@ func addSingleNumberDataPoint(pt pdata.NumberDataPoint, resource pdata.Resource, // addSingleHistogramDataPoint converts pt to 2 + min(len(ExplicitBounds), len(BucketCount)) + 1 samples. It // ignore extra buckets if len(ExplicitBounds) > len(BucketCounts) -func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Resource, metric pdata.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { +func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { time := convertTimeStamp(pt.Timestamp()) // sum, count, and buckets of the histogram should append suffix to baseName baseName := getPromMetricName(metric, settings.Namespace) @@ -295,7 +296,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res Value: pt.Sum(), Timestamp: time, } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { sum.Value = math.Float64frombits(value.StaleNaN) } @@ -307,7 +308,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res Value: float64(pt.Count()), Timestamp: time, } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { count.Value = math.Float64frombits(value.StaleNaN) } @@ -331,7 +332,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res Value: float64(cumulativeCount), Timestamp: time, } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { bucket.Value = math.Float64frombits(value.StaleNaN) } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) @@ -344,7 +345,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res infBucket := &prompb.Sample{ Timestamp: time, } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { infBucket.Value = math.Float64frombits(value.StaleNaN) } else { cumulativeCount += pt.BucketCounts()[len(pt.BucketCounts())-1] @@ -357,7 +358,7 @@ func addSingleHistogramDataPoint(pt pdata.HistogramDataPoint, resource pdata.Res addExemplars(tsMap, promExemplars, bucketBounds) } -func getPromExemplars(pt pdata.HistogramDataPoint) []prompb.Exemplar { +func getPromExemplars(pt pmetric.HistogramDataPoint) []prompb.Exemplar { var promExemplars []prompb.Exemplar for i := 0; i < pt.Exemplars().Len(); i++ { @@ -388,7 +389,7 @@ func getPromExemplars(pt pdata.HistogramDataPoint) []prompb.Exemplar { } var labelsFromAttributes []prompb.Label - exemplar.FilteredAttributes().Range(func(key string, value pdata.Value) bool { + exemplar.FilteredAttributes().Range(func(key string, value pcommon.Value) bool { val := value.AsString() exemplarRunes += utf8.RuneCountInString(key) + utf8.RuneCountInString(val) promLabel := prompb.Label{ @@ -413,26 +414,26 @@ func getPromExemplars(pt pdata.HistogramDataPoint) []prompb.Exemplar { } // mostRecentTimestampInMetric returns the latest timestamp in a batch of metrics -func mostRecentTimestampInMetric(metric pdata.Metric) pdata.Timestamp { - var ts pdata.Timestamp +func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { + var ts pcommon.Timestamp // handle individual metric based on type switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dataPoints := metric.Gauge().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dataPoints := metric.Sum().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dataPoints := metric.Histogram().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: dataPoints := metric.Summary().DataPoints() for x := 0; x < dataPoints.Len(); x++ { ts = maxTimestamp(ts, dataPoints.At(x).Timestamp()) @@ -441,7 +442,7 @@ func mostRecentTimestampInMetric(metric pdata.Metric) pdata.Timestamp { return ts } -func maxTimestamp(a, b pdata.Timestamp) pdata.Timestamp { +func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp { if a > b { return a } @@ -449,7 +450,7 @@ func maxTimestamp(a, b pdata.Timestamp) pdata.Timestamp { } // addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. -func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resource, metric pdata.Metric, settings Settings, +func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { time := convertTimeStamp(pt.Timestamp()) // sum and count of the summary should append suffix to baseName @@ -459,7 +460,7 @@ func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resourc Value: pt.Sum(), Timestamp: time, } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { sum.Value = math.Float64frombits(value.StaleNaN) } sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) @@ -470,7 +471,7 @@ func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resourc Value: float64(pt.Count()), Timestamp: time, } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { count.Value = math.Float64frombits(value.StaleNaN) } countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) @@ -483,7 +484,7 @@ func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resourc Value: qt.Value(), Timestamp: time, } - if pt.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if pt.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { quantile.Value = math.Float64frombits(value.StaleNaN) } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) @@ -493,7 +494,7 @@ func addSingleSummaryDataPoint(pt pdata.SummaryDataPoint, resource pdata.Resourc } // addResourceTargetInfo converts the resource to the target info metric -func addResourceTargetInfo(resource pdata.Resource, settings Settings, timestamp pdata.Timestamp, tsMap map[string]*prompb.TimeSeries) { +func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timestamp pcommon.Timestamp, tsMap map[string]*prompb.TimeSeries) { if resource.Attributes().Len() == 0 { return } @@ -504,9 +505,9 @@ func addResourceTargetInfo(resource pdata.Resource, settings Settings, timestamp } // Use resource attributes (other than those used for job+instance) as the // metric labels for the target info metric - attributes := pdata.NewMap() + attributes := pcommon.NewMap() resource.Attributes().CopyTo(attributes) - attributes.RemoveIf(func(k string, _ pdata.Value) bool { + attributes.RemoveIf(func(k string, _ pcommon.Value) bool { switch k { case conventions.AttributeServiceName, conventions.AttributeServiceNamespace, conventions.AttributeServiceInstanceID: // Remove resource attributes used for job + instance @@ -555,6 +556,6 @@ func sanitizeRune(r rune) rune { } // convertTimeStamp converts OTLP timestamp in ns to timestamp in ms -func convertTimeStamp(timestamp pdata.Timestamp) int64 { +func convertTimeStamp(timestamp pcommon.Timestamp) int64 { return timestamp.AsTime().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) } diff --git a/pkg/translator/prometheusremotewrite/helper_test.go b/pkg/translator/prometheusremotewrite/helper_test.go index a5034bfdf226..381f5a8ba4ea 100644 --- a/pkg/translator/prometheusremotewrite/helper_test.go +++ b/pkg/translator/prometheusremotewrite/helper_test.go @@ -22,8 +22,9 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" ) @@ -35,7 +36,7 @@ func Test_validateMetrics(t *testing.T) { // define a single test type combTest struct { name string - metric pdata.Metric + metric pmetric.Metric want bool } @@ -77,7 +78,7 @@ func Test_validateMetrics(t *testing.T) { // case. func Test_addSample(t *testing.T) { type testCase struct { - metric pdata.Metric + metric pmetric.Metric sample prompb.Sample labels []prompb.Label } @@ -141,7 +142,7 @@ func Test_timeSeriesSignature(t *testing.T) { tests := []struct { name string lbs []prompb.Label - metric pdata.Metric + metric pmetric.Metric want string }{ { @@ -184,15 +185,15 @@ func Test_timeSeriesSignature(t *testing.T) { func Test_createLabelSet(t *testing.T) { tests := []struct { name string - resource pdata.Resource - orig pdata.Map + resource pcommon.Resource + orig pcommon.Map externalLabels map[string]string extras []string want []prompb.Label }{ { "labels_clean", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbs1, map[string]string{}, []string{label31, value31, label32, value32}, @@ -200,9 +201,9 @@ func Test_createLabelSet(t *testing.T) { }, { "labels_with_resource", - getResource(map[string]pdata.Value{ - "service.name": pdata.NewValueString("prometheus"), - "service.instance.id": pdata.NewValueString("127.0.0.1:8080"), + getResource(map[string]pcommon.Value{ + "service.name": pcommon.NewValueString("prometheus"), + "service.instance.id": pcommon.NewValueString("127.0.0.1:8080"), }), lbs1, map[string]string{}, @@ -211,9 +212,9 @@ func Test_createLabelSet(t *testing.T) { }, { "labels_with_nonstring_resource", - getResource(map[string]pdata.Value{ - "service.name": pdata.NewValueInt(12345), - "service.instance.id": pdata.NewValueBool(true), + getResource(map[string]pcommon.Value{ + "service.name": pcommon.NewValueInt(12345), + "service.instance.id": pcommon.NewValueBool(true), }), lbs1, map[string]string{}, @@ -222,7 +223,7 @@ func Test_createLabelSet(t *testing.T) { }, { "labels_duplicate_in_extras", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbs1, map[string]string{}, []string{label11, value31}, @@ -230,7 +231,7 @@ func Test_createLabelSet(t *testing.T) { }, { "labels_dirty", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbs1Dirty, map[string]string{}, []string{label31 + dirty1, value31, label32, value32}, @@ -238,15 +239,15 @@ func Test_createLabelSet(t *testing.T) { }, { "no_original_case", - getResource(map[string]pdata.Value{}), - pdata.NewMap(), + getResource(map[string]pcommon.Value{}), + pcommon.NewMap(), nil, []string{label31, value31, label32, value32}, getPromLabels(label31, value31, label32, value32), }, { "empty_extra_case", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbs1, map[string]string{}, []string{"", ""}, @@ -254,7 +255,7 @@ func Test_createLabelSet(t *testing.T) { }, { "single_left_over_case", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbs1, map[string]string{}, []string{label31, value31, label32}, @@ -262,7 +263,7 @@ func Test_createLabelSet(t *testing.T) { }, { "valid_external_labels", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbs1, exlbs1, []string{label31, value31, label32, value32}, @@ -270,7 +271,7 @@ func Test_createLabelSet(t *testing.T) { }, { "overwritten_external_labels", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbs1, exlbs2, []string{label31, value31, label32, value32}, @@ -278,7 +279,7 @@ func Test_createLabelSet(t *testing.T) { }, { "colliding attributes", - getResource(map[string]pdata.Value{}), + getResource(map[string]pcommon.Value{}), lbsColliding, nil, []string{label31, value31, label32, value32}, @@ -299,7 +300,7 @@ func Test_createLabelSet(t *testing.T) { func Test_getPromMetricName(t *testing.T) { tests := []struct { name string - metric pdata.Metric + metric pmetric.Metric ns string want string }{ @@ -422,7 +423,7 @@ func Test_getPromExemplars(t *testing.T) { tnow := time.Now() tests := []struct { name string - histogram *pdata.HistogramDataPoint + histogram *pmetric.HistogramDataPoint expected []prompb.Exemplar }{ { @@ -501,18 +502,18 @@ func TestAddResourceTargetInfo(t *testing.T) { conventions.AttributeServiceInstanceID: "service-instance-id", "resource_attr": "resource-attr-val-1", } - resourceWithServiceAttrs := pdata.NewResource() - pdata.NewMapFromRaw(resourceAttrMap).CopyTo(resourceWithServiceAttrs.Attributes()) + resourceWithServiceAttrs := pcommon.NewResource() + pcommon.NewMapFromRaw(resourceAttrMap).CopyTo(resourceWithServiceAttrs.Attributes()) for _, tc := range []struct { desc string - resource pdata.Resource + resource pcommon.Resource settings Settings - timestamp pdata.Timestamp + timestamp pcommon.Timestamp expected map[string]*prompb.TimeSeries }{ { desc: "empty resource", - resource: pdata.NewResource(), + resource: pcommon.NewResource(), expected: map[string]*prompb.TimeSeries{}, }, { @@ -609,19 +610,19 @@ func TestAddResourceTargetInfo(t *testing.T) { } func TestMostRecentTimestampInMetric(t *testing.T) { - laterTimestamp := pdata.NewTimestampFromTime(testdata.TestMetricTime.Add(1 * time.Minute)) + laterTimestamp := pcommon.NewTimestampFromTime(testdata.TestMetricTime.Add(1 * time.Minute)) metricMultipleTimestamps := testdata.GenerateMetricsOneMetric().ResourceMetrics().At(0).InstrumentationLibraryMetrics().At(0).Metrics().At(0) // the first datapoint timestamp is at testdata.TestMetricTime metricMultipleTimestamps.Sum().DataPoints().At(1).SetTimestamp(laterTimestamp) for _, tc := range []struct { desc string - input pdata.Metric - expected pdata.Timestamp + input pmetric.Metric + expected pcommon.Timestamp }{ { desc: "empty", - input: pdata.NewMetric(), - expected: pdata.Timestamp(0), + input: pmetric.NewMetric(), + expected: pcommon.Timestamp(0), }, { desc: "multiple timestamps", diff --git a/pkg/translator/prometheusremotewrite/metrics_to_prw.go b/pkg/translator/prometheusremotewrite/metrics_to_prw.go index 80899f59cca3..9d8644ec5eeb 100644 --- a/pkg/translator/prometheusremotewrite/metrics_to_prw.go +++ b/pkg/translator/prometheusremotewrite/metrics_to_prw.go @@ -20,12 +20,13 @@ import ( "github.com/prometheus/prometheus/prompb" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" ) // Deprecated: [0.45.0] use `prometheusremotewrite.FromMetrics`. It does not wrap the error as `NewPermanent`. -func MetricsToPRW(namespace string, externalLabels map[string]string, md pdata.Metrics) (map[string]*prompb.TimeSeries, int, error) { +func MetricsToPRW(namespace string, externalLabels map[string]string, md pmetric.Metrics) (map[string]*prompb.TimeSeries, int, error) { tsMap, err := FromMetrics(md, Settings{Namespace: namespace, ExternalLabels: externalLabels}) if err != nil { err = consumererror.NewPermanent(err) @@ -38,8 +39,8 @@ type Settings struct { ExternalLabels map[string]string } -// FromMetrics converts pdata.Metrics to prometheus remote write format. -func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb.TimeSeries, errs error) { +// FromMetrics converts pmetric.Metrics to prometheus remote write format. +func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*prompb.TimeSeries, errs error) { tsMap = make(map[string]*prompb.TimeSeries) resourceMetricsSlice := md.ResourceMetrics() @@ -49,7 +50,7 @@ func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb. scopeMetricsSlice := resourceMetrics.ScopeMetrics() // keep track of the most recent timestamp in the ResourceMetrics for // use with the "target" info metric - var mostRecentTimestamp pdata.Timestamp + var mostRecentTimestamp pcommon.Timestamp for j := 0; j < scopeMetricsSlice.Len(); j++ { scopeMetrics := scopeMetricsSlice.At(j) metricSlice := scopeMetrics.Metrics() @@ -67,18 +68,18 @@ func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb. // handle individual metric based on type switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dataPoints := metric.Gauge().DataPoints() if err := addNumberDataPointSlice(dataPoints, resource, metric, settings, tsMap); err != nil { errs = multierr.Append(errs, err) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dataPoints := metric.Sum().DataPoints() if err := addNumberDataPointSlice(dataPoints, resource, metric, settings, tsMap); err != nil { errs = multierr.Append(errs, err) } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dataPoints := metric.Histogram().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) @@ -86,7 +87,7 @@ func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb. for x := 0; x < dataPoints.Len(); x++ { addSingleHistogramDataPoint(dataPoints.At(x), resource, metric, settings, tsMap) } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: dataPoints := metric.Summary().DataPoints() if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) @@ -105,8 +106,8 @@ func FromMetrics(md pdata.Metrics, settings Settings) (tsMap map[string]*prompb. return } -func addNumberDataPointSlice(dataPoints pdata.NumberDataPointSlice, - resource pdata.Resource, metric pdata.Metric, +func addNumberDataPointSlice(dataPoints pmetric.NumberDataPointSlice, + resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) error { if dataPoints.Len() == 0 { return fmt.Errorf("empty data points. %s is dropped", metric.Name()) diff --git a/pkg/translator/prometheusremotewrite/testutils_test.go b/pkg/translator/prometheusremotewrite/testutils_test.go index 3127208c89ed..d01aa7cc1522 100644 --- a/pkg/translator/prometheusremotewrite/testutils_test.go +++ b/pkg/translator/prometheusremotewrite/testutils_test.go @@ -23,7 +23,8 @@ import ( "github.com/prometheus/prometheus/prompb" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) var ( @@ -124,7 +125,7 @@ var ( unmatchedBoundBucketHist = "unmatchedBoundBucketHist" // valid metrics as input should not return error - validMetrics1 = map[string]pdata.Metric{ + validMetrics1 = map[string]pmetric.Metric{ validIntGauge: getIntGaugeMetric(validIntGauge, lbs1, intVal1, time1), validDoubleGauge: getDoubleGaugeMetric(validDoubleGauge, lbs1, floatVal1, time1), validIntSum: getIntSumMetric(validIntSum, lbs1, intVal1, time1), @@ -133,7 +134,7 @@ var ( validHistogram: getHistogramMetric(validHistogram, lbs1, time1, floatVal1, uint64(intVal1), bounds, buckets), validSummary: getSummaryMetric(validSummary, lbs1, time1, floatVal1, uint64(intVal1), quantiles), } - validMetrics2 = map[string]pdata.Metric{ + validMetrics2 = map[string]pmetric.Metric{ validIntGauge: getIntGaugeMetric(validIntGauge, lbs2, intVal2, time2), validDoubleGauge: getDoubleGaugeMetric(validDoubleGauge, lbs2, floatVal2, time2), validIntSum: getIntSumMetric(validIntSum, lbs2, intVal2, time2), @@ -141,7 +142,7 @@ var ( validHistogram: getHistogramMetric(validHistogram, lbs2, time2, floatVal2, uint64(intVal2), bounds, buckets), validSummary: getSummaryMetric(validSummary, lbs2, time2, floatVal2, uint64(intVal2), quantiles), validIntGaugeDirty: getIntGaugeMetric(validIntGaugeDirty, lbs1, intVal1, time1), - unmatchedBoundBucketHist: getHistogramMetric(unmatchedBoundBucketHist, pdata.NewMap(), 0, 0, 0, []float64{0.1, 0.2, 0.3}, []uint64{1, 2}), + unmatchedBoundBucketHist: getHistogramMetric(unmatchedBoundBucketHist, pcommon.NewMap(), 0, 0, 0, []float64{0.1, 0.2, 0.3}, []uint64{1, 2}), } empty = "empty" @@ -157,8 +158,8 @@ var ( emptyCumulativeHistogram = "emptyCumulativeHistogram" // different metrics that will not pass validate metrics and will cause the exporter to return an error - invalidMetrics = map[string]pdata.Metric{ - empty: pdata.NewMetric(), + invalidMetrics = map[string]pmetric.Metric{ + empty: pmetric.NewMetric(), emptyGauge: getEmptyGaugeMetric(emptyGauge), emptySum: getEmptySumMetric(emptySum), emptyHistogram: getEmptyHistogramMetric(emptyHistogram), @@ -170,8 +171,8 @@ var ( // OTLP metrics // attributes must come in pairs -func getAttributes(labels ...string) pdata.Map { - attributeMap := pdata.NewMap() +func getAttributes(labels ...string) pcommon.Map { + attributeMap := pcommon.NewMap() for i := 0; i < len(labels); i += 2 { attributeMap.UpsertString(labels[i], labels[i+1]) } @@ -226,40 +227,40 @@ func getTimeSeriesWithSamplesAndExemplars(labels []prompb.Label, samples []promp } } -func getHistogramDataPointWithExemplars(t *testing.T, time time.Time, value float64, traceID string, spanID string, attributeKey string, attributeValue string) *pdata.HistogramDataPoint { - h := pdata.NewHistogramDataPoint() +func getHistogramDataPointWithExemplars(t *testing.T, time time.Time, value float64, traceID string, spanID string, attributeKey string, attributeValue string) *pmetric.HistogramDataPoint { + h := pmetric.NewHistogramDataPoint() e := h.Exemplars().AppendEmpty() e.SetDoubleVal(value) - e.SetTimestamp(pdata.NewTimestampFromTime(time)) - e.FilteredAttributes().Insert(attributeKey, pdata.NewValueString(attributeValue)) + e.SetTimestamp(pcommon.NewTimestampFromTime(time)) + e.FilteredAttributes().Insert(attributeKey, pcommon.NewValueString(attributeValue)) if traceID != "" { var traceIDBytes [16]byte traceIDBytesSlice, err := hex.DecodeString(traceID) require.NoErrorf(t, err, "error decoding trace id: %v", err) copy(traceIDBytes[:], traceIDBytesSlice) - e.SetTraceID(pdata.NewTraceID(traceIDBytes)) + e.SetTraceID(pcommon.NewTraceID(traceIDBytes)) } if spanID != "" { var spanIDBytes [8]byte spanIDBytesSlice, err := hex.DecodeString(spanID) require.NoErrorf(t, err, "error decoding span id: %v", err) copy(spanIDBytes[:], spanIDBytesSlice) - e.SetSpanID(pdata.NewSpanID(spanIDBytes)) + e.SetSpanID(pcommon.NewSpanID(spanIDBytes)) } return &h } -func getHistogramDataPoint() *pdata.HistogramDataPoint { - h := pdata.NewHistogramDataPoint() +func getHistogramDataPoint() *pmetric.HistogramDataPoint { + h := pmetric.NewHistogramDataPoint() return &h } -func getQuantiles(bounds []float64, values []float64) pdata.ValueAtQuantileSlice { - quantiles := pdata.NewValueAtQuantileSlice() +func getQuantiles(bounds []float64, values []float64) pmetric.ValueAtQuantileSlice { + quantiles := pmetric.NewValueAtQuantileSlice() quantiles.EnsureCapacity(len(bounds)) for i := 0; i < len(bounds); i++ { @@ -271,17 +272,17 @@ func getQuantiles(bounds []float64, values []float64) pdata.ValueAtQuantileSlice return quantiles } -func getEmptyGaugeMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyGaugeMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) return metric } -func getIntGaugeMetric(name string, attributes pdata.Map, value int64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getIntGaugeMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) dp := metric.Gauge().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -289,15 +290,15 @@ func getIntGaugeMetric(name string, attributes pdata.Map, value int64, ts uint64 dp.SetIntVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getDoubleGaugeMetric(name string, attributes pdata.Map, value float64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getDoubleGaugeMetric(name string, attributes pcommon.Map, value float64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) dp := metric.Gauge().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -305,23 +306,23 @@ func getDoubleGaugeMetric(name string, attributes pdata.Map, value float64, ts u dp.SetDoubleVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptySumMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptySumMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) return metric } -func getIntSumMetric(name string, attributes pdata.Map, value int64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getIntSumMetric(name string, attributes pcommon.Map, value int64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -329,24 +330,24 @@ func getIntSumMetric(name string, attributes pdata.Map, value int64, ts uint64) dp.SetIntVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptyCumulativeSumMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyCumulativeSumMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) return metric } -func getSumMetric(name string, attributes pdata.Map, value float64, ts uint64) pdata.Metric { - metric := pdata.NewMetric() +func getSumMetric(name string, attributes pcommon.Map, value float64, ts uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeSum) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Sum().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -354,31 +355,31 @@ func getSumMetric(name string, attributes pdata.Map, value float64, ts uint64) p dp.SetDoubleVal(value) attributes.CopyTo(dp.Attributes()) - dp.SetStartTimestamp(pdata.Timestamp(0)) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetStartTimestamp(pcommon.Timestamp(0)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptyHistogramMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyHistogramMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) return metric } -func getEmptyCumulativeHistogramMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptyCumulativeHistogramMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) return metric } -func getHistogramMetric(name string, attributes pdata.Map, ts uint64, sum float64, count uint64, bounds []float64, buckets []uint64) pdata.Metric { - metric := pdata.NewMetric() +func getHistogramMetric(name string, attributes pcommon.Map, ts uint64, sum float64, count uint64, bounds []float64, buckets []uint64) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeHistogram) - metric.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.SetDataType(pmetric.MetricDataTypeHistogram) + metric.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := metric.Histogram().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) @@ -389,33 +390,33 @@ func getHistogramMetric(name string, attributes pdata.Map, ts uint64, sum float6 dp.SetExplicitBounds(bounds) attributes.CopyTo(dp.Attributes()) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetTimestamp(pcommon.Timestamp(ts)) return metric } -func getEmptySummaryMetric(name string) pdata.Metric { - metric := pdata.NewMetric() +func getEmptySummaryMetric(name string) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) return metric } -func getSummaryMetric(name string, attributes pdata.Map, ts uint64, sum float64, count uint64, quantiles pdata.ValueAtQuantileSlice) pdata.Metric { - metric := pdata.NewMetric() +func getSummaryMetric(name string, attributes pcommon.Map, ts uint64, sum float64, count uint64, quantiles pmetric.ValueAtQuantileSlice) pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) dp := metric.Summary().DataPoints().AppendEmpty() if strings.HasPrefix(name, "staleNaN") { dp.SetFlags(1) } dp.SetCount(count) dp.SetSum(sum) - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { dp.Attributes().Upsert(k, v) return true }) - dp.SetTimestamp(pdata.Timestamp(ts)) + dp.SetTimestamp(pcommon.Timestamp(ts)) quantiles.CopyTo(dp.QuantileValues()) quantiles.At(0).Quantile() @@ -423,8 +424,8 @@ func getSummaryMetric(name string, attributes pdata.Map, ts uint64, sum float64, return metric } -func getResource(resources map[string]pdata.Value) pdata.Resource { - resource := pdata.NewResource() +func getResource(resources map[string]pcommon.Value) pcommon.Resource { + resource := pcommon.NewResource() for k, v := range resources { resource.Attributes().Upsert(k, v) diff --git a/pkg/translator/signalfx/from_metrics.go b/pkg/translator/signalfx/from_metrics.go index 02b6a2102cf0..8c1e5e2dcbae 100644 --- a/pkg/translator/signalfx/from_metrics.go +++ b/pkg/translator/signalfx/from_metrics.go @@ -19,7 +19,8 @@ import ( "strconv" sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Some fields on SignalFx protobuf are pointers, in order to reduce @@ -40,8 +41,8 @@ var ( infinityBoundSFxDimValue = float64ToDimValue(math.Inf(1)) ) -// FromMetrics converts pdata.Metrics to SignalFx proto data points. -func FromMetrics(md pdata.Metrics) ([]*sfxpb.DataPoint, error) { +// FromMetrics converts pmetric.Metrics to SignalFx proto data points. +func FromMetrics(md pmetric.Metrics) ([]*sfxpb.DataPoint, error) { var sfxDataPoints []*sfxpb.DataPoint rms := md.ResourceMetrics() @@ -60,9 +61,9 @@ func FromMetrics(md pdata.Metrics) ([]*sfxpb.DataPoint, error) { return sfxDataPoints, nil } -// FromMetric converts pdata.Metric to SignalFx proto data points. +// FromMetric converts pmetric.Metric to SignalFx proto data points. // TODO: Remove this and change signalfxexporter to us FromMetrics. -func FromMetric(m pdata.Metric, extraDimensions []*sfxpb.Dimension) []*sfxpb.DataPoint { +func FromMetric(m pmetric.Metric, extraDimensions []*sfxpb.Dimension) []*sfxpb.DataPoint { var dps []*sfxpb.DataPoint basePoint := &sfxpb.DataPoint{ @@ -71,35 +72,35 @@ func FromMetric(m pdata.Metric, extraDimensions []*sfxpb.Dimension) []*sfxpb.Dat } switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps = convertNumberDataPoints(m.Gauge().DataPoints(), basePoint, extraDimensions) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dps = convertNumberDataPoints(m.Sum().DataPoints(), basePoint, extraDimensions) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dps = convertHistogram(m.Histogram().DataPoints(), basePoint, extraDimensions) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: dps = convertSummaryDataPoints(m.Summary().DataPoints(), m.Name(), extraDimensions) } return dps } -func fromMetricTypeToMetricType(metric pdata.Metric) *sfxpb.MetricType { +func fromMetricTypeToMetricType(metric pmetric.Metric) *sfxpb.MetricType { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return &sfxMetricTypeGauge - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: if !metric.Sum().IsMonotonic() { return &sfxMetricTypeGauge } - if metric.Sum().AggregationTemporality() == pdata.MetricAggregationTemporalityDelta { + if metric.Sum().AggregationTemporality() == pmetric.MetricAggregationTemporalityDelta { return &sfxMetricTypeCounter } return &sfxMetricTypeCumulativeCounter - case pdata.MetricDataTypeHistogram: - if metric.Histogram().AggregationTemporality() == pdata.MetricAggregationTemporalityDelta { + case pmetric.MetricDataTypeHistogram: + if metric.Histogram().AggregationTemporality() == pmetric.MetricAggregationTemporalityDelta { return &sfxMetricTypeCounter } return &sfxMetricTypeCumulativeCounter @@ -108,7 +109,7 @@ func fromMetricTypeToMetricType(metric pdata.Metric) *sfxpb.MetricType { return nil } -func convertNumberDataPoints(in pdata.NumberDataPointSlice, basePoint *sfxpb.DataPoint, extraDims []*sfxpb.Dimension) []*sfxpb.DataPoint { +func convertNumberDataPoints(in pmetric.NumberDataPointSlice, basePoint *sfxpb.DataPoint, extraDims []*sfxpb.Dimension) []*sfxpb.DataPoint { out := make([]*sfxpb.DataPoint, 0, in.Len()) for i := 0; i < in.Len(); i++ { @@ -119,10 +120,10 @@ func convertNumberDataPoints(in pdata.NumberDataPointSlice, basePoint *sfxpb.Dat dp.Dimensions = attributesToDimensions(inDp.Attributes(), extraDims) switch inDp.ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: val := inDp.IntVal() dp.Value.IntValue = &val - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: val := inDp.DoubleVal() dp.Value.DoubleValue = &val } @@ -132,7 +133,7 @@ func convertNumberDataPoints(in pdata.NumberDataPointSlice, basePoint *sfxpb.Dat return out } -func convertHistogram(histDPs pdata.HistogramDataPointSlice, basePoint *sfxpb.DataPoint, extraDims []*sfxpb.Dimension) []*sfxpb.DataPoint { +func convertHistogram(histDPs pmetric.HistogramDataPointSlice, basePoint *sfxpb.DataPoint, extraDims []*sfxpb.Dimension) []*sfxpb.DataPoint { var out []*sfxpb.DataPoint for i := 0; i < histDPs.Len(); i++ { @@ -188,7 +189,7 @@ func convertHistogram(histDPs pdata.HistogramDataPointSlice, basePoint *sfxpb.Da } func convertSummaryDataPoints( - in pdata.SummaryDataPointSlice, + in pmetric.SummaryDataPointSlice, name string, extraDims []*sfxpb.Dimension, ) []*sfxpb.DataPoint { @@ -241,7 +242,7 @@ func convertSummaryDataPoints( return out } -func attributesToDimensions(attributes pdata.Map, extraDims []*sfxpb.Dimension) []*sfxpb.Dimension { +func attributesToDimensions(attributes pcommon.Map, extraDims []*sfxpb.Dimension) []*sfxpb.Dimension { dimensions := make([]*sfxpb.Dimension, len(extraDims), attributes.Len()+len(extraDims)) copy(dimensions, extraDims) if attributes.Len() == 0 { @@ -249,7 +250,7 @@ func attributesToDimensions(attributes pdata.Map, extraDims []*sfxpb.Dimension) } dimensionsValue := make([]sfxpb.Dimension, attributes.Len()) pos := 0 - attributes.Range(func(k string, v pdata.Value) bool { + attributes.Range(func(k string, v pcommon.Value) bool { dimensionsValue[pos].Key = k dimensionsValue[pos].Value = v.AsString() dimensions = append(dimensions, &dimensionsValue[pos]) diff --git a/pkg/translator/signalfx/from_metrics_test.go b/pkg/translator/signalfx/from_metrics_test.go index 4e0880e2a5d9..4fbb0bd1c5e9 100644 --- a/pkg/translator/signalfx/from_metrics_test.go +++ b/pkg/translator/signalfx/from_metrics_test.go @@ -23,7 +23,8 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/maps" ) @@ -40,57 +41,57 @@ func Test_FromMetrics(t *testing.T) { "k1": "v1", } - ts := pdata.NewTimestampFromTime(time.Unix(unixSecs, unixNSecs)) + ts := pcommon.NewTimestampFromTime(time.Unix(unixSecs, unixNSecs)) const doubleVal = 1234.5678 - initDoublePt := func(doublePt pdata.NumberDataPoint) { + initDoublePt := func(doublePt pmetric.NumberDataPoint) { doublePt.SetTimestamp(ts) doublePt.SetDoubleVal(doubleVal) } - initDoublePtWithLabels := func(doublePtWithLabels pdata.NumberDataPoint) { + initDoublePtWithLabels := func(doublePtWithLabels pmetric.NumberDataPoint) { initDoublePt(doublePtWithLabels) - pdata.NewMapFromRaw(labelMap).CopyTo(doublePtWithLabels.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(doublePtWithLabels.Attributes()) } const int64Val = int64(123) - initInt64Pt := func(int64Pt pdata.NumberDataPoint) { + initInt64Pt := func(int64Pt pmetric.NumberDataPoint) { int64Pt.SetTimestamp(ts) int64Pt.SetIntVal(int64Val) } - initInt64PtWithLabels := func(int64PtWithLabels pdata.NumberDataPoint) { + initInt64PtWithLabels := func(int64PtWithLabels pmetric.NumberDataPoint) { initInt64Pt(int64PtWithLabels) - pdata.NewMapFromRaw(labelMap).CopyTo(int64PtWithLabels.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(int64PtWithLabels.Attributes()) } histBounds := []float64{1, 2, 4} histCounts := []uint64{4, 2, 3, 7} - initHistDP := func(histDP pdata.HistogramDataPoint) { + initHistDP := func(histDP pmetric.HistogramDataPoint) { histDP.SetTimestamp(ts) histDP.SetCount(16) histDP.SetSum(100.0) histDP.SetExplicitBounds(histBounds) histDP.SetBucketCounts(histCounts) - pdata.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) } - histDP := pdata.NewHistogramDataPoint() + histDP := pmetric.NewHistogramDataPoint() initHistDP(histDP) - initHistDPNoBuckets := func(histDP pdata.HistogramDataPoint) { + initHistDPNoBuckets := func(histDP pmetric.HistogramDataPoint) { histDP.SetCount(2) histDP.SetSum(10) histDP.SetTimestamp(ts) - pdata.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(histDP.Attributes()) } - histDPNoBuckets := pdata.NewHistogramDataPoint() + histDPNoBuckets := pmetric.NewHistogramDataPoint() initHistDPNoBuckets(histDPNoBuckets) const summarySumVal = 123.4 const summaryCountVal = 111 - initSummaryDP := func(summaryDP pdata.SummaryDataPoint) { + initSummaryDP := func(summaryDP pmetric.SummaryDataPoint) { summaryDP.SetTimestamp(ts) summaryDP.SetSum(summarySumVal) summaryDP.SetCount(summaryCountVal) @@ -100,82 +101,82 @@ func Test_FromMetrics(t *testing.T) { qv.SetQuantile(0.25 * float64(i+1)) qv.SetValue(float64(i)) } - pdata.NewMapFromRaw(labelMap).CopyTo(summaryDP.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(summaryDP.Attributes()) } - initEmptySummaryDP := func(summaryDP pdata.SummaryDataPoint) { + initEmptySummaryDP := func(summaryDP pmetric.SummaryDataPoint) { summaryDP.SetTimestamp(ts) summaryDP.SetSum(summarySumVal) summaryDP.SetCount(summaryCountVal) - pdata.NewMapFromRaw(labelMap).CopyTo(summaryDP.Attributes()) + pcommon.NewMapFromRaw(labelMap).CopyTo(summaryDP.Attributes()) } tests := []struct { name string - metricsFn func() pdata.Metrics + metricsFn func() pmetric.Metrics wantSfxDataPoints []*sfxpb.DataPoint }{ { name: "nil_node_nil_resources_no_dims", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePt(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64Pt(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) initDoublePt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) initInt64Pt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("delta_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) initDoublePt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("delta_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) initInt64Pt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_sum_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) initDoublePt(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_sum_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(false) initInt64Pt(m.Sum().DataPoints().AppendEmpty()) } @@ -195,33 +196,33 @@ func Test_FromMetrics(t *testing.T) { }, { name: "nil_node_and_resources_with_dims", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initDoublePtWithLabels(m.Sum().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) initInt64PtWithLabels(m.Sum().DataPoints().AppendEmpty()) } @@ -237,8 +238,8 @@ func Test_FromMetrics(t *testing.T) { }, { name: "with_node_resources_dims", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() res := rm.Resource() res.Attributes().InsertString("k_r0", "v_r0") @@ -252,13 +253,13 @@ func Test_FromMetrics(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initDoublePtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) initInt64PtWithLabels(m.Gauge().DataPoints().AppendEmpty()) } @@ -289,20 +290,20 @@ func Test_FromMetrics(t *testing.T) { }, { name: "histograms", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("double_histo") - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) initHistDP(m.Histogram().DataPoints().AppendEmpty()) } { m := ilm.Metrics().AppendEmpty() m.SetName("double_delta_histo") - m.SetDataType(pdata.MetricDataTypeHistogram) - m.Histogram().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.SetDataType(pmetric.MetricDataTypeHistogram) + m.Histogram().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) initHistDP(m.Histogram().DataPoints().AppendEmpty()) } return out @@ -314,12 +315,12 @@ func Test_FromMetrics(t *testing.T) { }, { name: "distribution_no_buckets", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("no_bucket_histo") - m.SetDataType(pdata.MetricDataTypeHistogram) + m.SetDataType(pmetric.MetricDataTypeHistogram) initHistDPNoBuckets(m.Histogram().DataPoints().AppendEmpty()) return out @@ -328,12 +329,12 @@ func Test_FromMetrics(t *testing.T) { }, { name: "summaries", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("summary") - m.SetDataType(pdata.MetricDataTypeSummary) + m.SetDataType(pmetric.MetricDataTypeSummary) initSummaryDP(m.Summary().DataPoints().AppendEmpty()) return out @@ -342,12 +343,12 @@ func Test_FromMetrics(t *testing.T) { }, { name: "empty_summary", - metricsFn: func() pdata.Metrics { - out := pdata.NewMetrics() + metricsFn: func() pmetric.Metrics { + out := pmetric.NewMetrics() ilm := out.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() m.SetName("empty_summary") - m.SetDataType(pdata.MetricDataTypeSummary) + m.SetDataType(pmetric.MetricDataTypeSummary) initEmptySummaryDP(m.Summary().DataPoints().AppendEmpty()) return out @@ -425,7 +426,7 @@ func sfxDimensions(m map[string]interface{}) []*sfxpb.Dimension { func expectedFromHistogram( metricName string, dims map[string]interface{}, - histDP pdata.HistogramDataPoint, + histDP pmetric.HistogramDataPoint, isDelta bool, ) []*sfxpb.DataPoint { buckets := histDP.BucketCounts() diff --git a/pkg/translator/signalfx/go.mod b/pkg/translator/signalfx/go.mod index 8683e8abb052..5edd4162aadc 100644 --- a/pkg/translator/signalfx/go.mod +++ b/pkg/translator/signalfx/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) @@ -22,3 +22,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/translator/signalfx/go.sum b/pkg/translator/signalfx/go.sum index 90ce7975fe62..c99976909982 100644 --- a/pkg/translator/signalfx/go.sum +++ b/pkg/translator/signalfx/go.sum @@ -35,8 +35,8 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= diff --git a/pkg/translator/signalfx/timestamp.go b/pkg/translator/signalfx/timestamp.go index 44a5ecb67be6..d189b18aca68 100644 --- a/pkg/translator/signalfx/timestamp.go +++ b/pkg/translator/signalfx/timestamp.go @@ -14,18 +14,16 @@ package signalfx // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx" -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/pdata/pcommon" const millisToNanos = 1e6 -func fromTimestamp(ts pdata.Timestamp) int64 { +func fromTimestamp(ts pcommon.Timestamp) int64 { // Convert nanos to millis. return int64(ts) / millisToNanos } -func toTimestamp(ts int64) pdata.Timestamp { +func toTimestamp(ts int64) pcommon.Timestamp { // Convert millis to nanos. - return pdata.Timestamp(ts * millisToNanos) + return pcommon.Timestamp(ts * millisToNanos) } diff --git a/pkg/translator/signalfx/to_metrics.go b/pkg/translator/signalfx/to_metrics.go index 61889354e9c9..c27b05184415 100644 --- a/pkg/translator/signalfx/to_metrics.go +++ b/pkg/translator/signalfx/to_metrics.go @@ -18,15 +18,16 @@ import ( "fmt" "github.com/signalfx/com_signalfx_metrics_protobuf/model" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" ) -// ToMetrics converts SignalFx proto data points to pdata.Metrics. -func ToMetrics(sfxDataPoints []*model.DataPoint) (pdata.Metrics, error) { +// ToMetrics converts SignalFx proto data points to pmetric.Metrics. +func ToMetrics(sfxDataPoints []*model.DataPoint) (pmetric.Metrics, error) { // TODO: not optimized at all, basically regenerating everything for each // data point. - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() ilm := rm.ScopeMetrics().AppendEmpty() @@ -45,7 +46,7 @@ func ToMetrics(sfxDataPoints []*model.DataPoint) (pdata.Metrics, error) { return md, err } -func setDataTypeAndPoints(sfxDataPoint *model.DataPoint, ms pdata.MetricSlice) error { +func setDataTypeAndPoints(sfxDataPoint *model.DataPoint, ms pmetric.MetricSlice) error { // Combine metric type with the actual data point type sfxMetricType := sfxDataPoint.GetMetricType() sfxDatum := sfxDataPoint.Value @@ -53,25 +54,25 @@ func setDataTypeAndPoints(sfxDataPoint *model.DataPoint, ms pdata.MetricSlice) e return fmt.Errorf("nil datum value for data-point in metric %q", sfxDataPoint.GetMetric()) } - var m pdata.Metric + var m pmetric.Metric switch sfxMetricType { case model.MetricType_GAUGE: m = ms.AppendEmpty() // Numerical: Periodic, instantaneous measurement of some state. - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) fillNumberDataPoint(sfxDataPoint, m.Gauge().DataPoints()) case model.MetricType_COUNTER: m = ms.AppendEmpty() - m.SetDataType(pdata.MetricDataTypeSum) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + m.SetDataType(pmetric.MetricDataTypeSum) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) m.Sum().SetIsMonotonic(true) fillNumberDataPoint(sfxDataPoint, m.Sum().DataPoints()) case model.MetricType_CUMULATIVE_COUNTER: m = ms.AppendEmpty() - m.SetDataType(pdata.MetricDataTypeSum) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.SetDataType(pmetric.MetricDataTypeSum) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.Sum().SetIsMonotonic(true) fillNumberDataPoint(sfxDataPoint, m.Sum().DataPoints()) @@ -82,7 +83,7 @@ func setDataTypeAndPoints(sfxDataPoint *model.DataPoint, ms pdata.MetricSlice) e return nil } -func fillNumberDataPoint(sfxDataPoint *model.DataPoint, dps pdata.NumberDataPointSlice) { +func fillNumberDataPoint(sfxDataPoint *model.DataPoint, dps pmetric.NumberDataPointSlice) { dp := dps.AppendEmpty() dp.SetTimestamp(toTimestamp(sfxDataPoint.GetTimestamp())) switch { @@ -96,7 +97,7 @@ func fillNumberDataPoint(sfxDataPoint *model.DataPoint, dps pdata.NumberDataPoin func fillInAttributes( dimensions []*model.Dimension, - attributes pdata.Map, + attributes pcommon.Map, ) { attributes.Clear() attributes.EnsureCapacity(len(dimensions)) diff --git a/pkg/translator/signalfx/to_metrics_test.go b/pkg/translator/signalfx/to_metrics_test.go index 3b1b119bb502..88e5187ad2ec 100644 --- a/pkg/translator/signalfx/to_metrics_test.go +++ b/pkg/translator/signalfx/to_metrics_test.go @@ -21,7 +21,8 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func Test_ToMetrics(t *testing.T) { @@ -39,8 +40,8 @@ func Test_ToMetrics(t *testing.T) { } } - buildDefaultMetrics := func(typ pdata.MetricDataType, value interface{}) pdata.Metrics { - out := pdata.NewMetrics() + buildDefaultMetrics := func(typ pmetric.MetricDataType, value interface{}) pmetric.Metrics { + out := pmetric.NewMetrics() rm := out.ResourceMetrics().AppendEmpty() ilm := rm.ScopeMetrics().AppendEmpty() m := ilm.Metrics().AppendEmpty() @@ -48,13 +49,13 @@ func Test_ToMetrics(t *testing.T) { m.SetDataType(typ) m.SetName("single") - var dps pdata.NumberDataPointSlice + var dps pmetric.NumberDataPointSlice switch typ { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps = m.Gauge().DataPoints() - case pdata.MetricDataTypeSum: - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + case pmetric.MetricDataTypeSum: + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dps = m.Sum().DataPoints() } @@ -64,7 +65,7 @@ func Test_ToMetrics(t *testing.T) { dp.Attributes().InsertString("k2", "v2") dp.Attributes().Sort() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Truncate(time.Millisecond))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Truncate(time.Millisecond))) switch val := value.(type) { case int: @@ -79,13 +80,13 @@ func Test_ToMetrics(t *testing.T) { tests := []struct { name string sfxDataPoints []*sfxpb.DataPoint - wantMetrics pdata.Metrics + wantMetrics pmetric.Metrics wantError bool }{ { name: "int_gauge", sfxDataPoints: []*sfxpb.DataPoint{buildDefaulstSFxDataPt()}, - wantMetrics: buildDefaultMetrics(pdata.MetricDataTypeGauge, 13), + wantMetrics: buildDefaultMetrics(pmetric.MetricDataTypeGauge, 13), }, { name: "double_gauge", @@ -97,7 +98,7 @@ func Test_ToMetrics(t *testing.T) { } return []*sfxpb.DataPoint{pt} }(), - wantMetrics: buildDefaultMetrics(pdata.MetricDataTypeGauge, 13.13), + wantMetrics: buildDefaultMetrics(pmetric.MetricDataTypeGauge, 13.13), }, { name: "int_counter", @@ -106,10 +107,10 @@ func Test_ToMetrics(t *testing.T) { pt.MetricType = sfxTypePtr(sfxpb.MetricType_COUNTER) return []*sfxpb.DataPoint{pt} }(), - wantMetrics: func() pdata.Metrics { - m := buildDefaultMetrics(pdata.MetricDataTypeSum, 13) + wantMetrics: func() pmetric.Metrics { + m := buildDefaultMetrics(pmetric.MetricDataTypeSum, 13) d := m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum() - d.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + d.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) d.SetIsMonotonic(true) return m }(), @@ -124,10 +125,10 @@ func Test_ToMetrics(t *testing.T) { } return []*sfxpb.DataPoint{pt} }(), - wantMetrics: func() pdata.Metrics { - m := buildDefaultMetrics(pdata.MetricDataTypeSum, 13.13) + wantMetrics: func() pmetric.Metrics { + m := buildDefaultMetrics(pmetric.MetricDataTypeSum, 13.13) d := m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum() - d.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + d.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) d.SetIsMonotonic(true) return m }(), @@ -139,8 +140,8 @@ func Test_ToMetrics(t *testing.T) { pt.Timestamp = 0 return []*sfxpb.DataPoint{pt} }(), - wantMetrics: func() pdata.Metrics { - md := buildDefaultMetrics(pdata.MetricDataTypeGauge, 13) + wantMetrics: func() pmetric.Metrics { + md := buildDefaultMetrics(pmetric.MetricDataTypeGauge, 13) md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).SetTimestamp(0) return md }(), @@ -152,8 +153,8 @@ func Test_ToMetrics(t *testing.T) { pt.Dimensions[0].Value = "" return []*sfxpb.DataPoint{pt} }(), - wantMetrics: func() pdata.Metrics { - md := buildDefaultMetrics(pdata.MetricDataTypeGauge, 13) + wantMetrics: func() pmetric.Metrics { + md := buildDefaultMetrics(pmetric.MetricDataTypeGauge, 13) md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes().UpdateString("k0", "") return md }(), @@ -170,12 +171,12 @@ func Test_ToMetrics(t *testing.T) { pt.Dimensions = dimensions return []*sfxpb.DataPoint{pt} }(), - wantMetrics: buildDefaultMetrics(pdata.MetricDataTypeGauge, 13), + wantMetrics: buildDefaultMetrics(pmetric.MetricDataTypeGauge, 13), }, { name: "nil_datapoint_ignored", sfxDataPoints: []*sfxpb.DataPoint{nil, buildDefaulstSFxDataPt(), nil}, - wantMetrics: buildDefaultMetrics(pdata.MetricDataTypeGauge, 13), + wantMetrics: buildDefaultMetrics(pmetric.MetricDataTypeGauge, 13), }, { name: "drop_inconsistent_datapoints", @@ -198,7 +199,7 @@ func Test_ToMetrics(t *testing.T) { return []*sfxpb.DataPoint{pt0, buildDefaulstSFxDataPt(), pt1, pt2, pt3} }(), - wantMetrics: buildDefaultMetrics(pdata.MetricDataTypeGauge, 13), + wantMetrics: buildDefaultMetrics(pmetric.MetricDataTypeGauge, 13), wantError: true, }, } diff --git a/pkg/translator/zipkin/go.mod b/pkg/translator/zipkin/go.mod index 35578642bc17..400bfc163976 100644 --- a/pkg/translator/zipkin/go.mod +++ b/pkg/translator/zipkin/go.mod @@ -10,7 +10,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/openzipkin/zipkin-go v0.4.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 google.golang.org/protobuf v1.28.0 ) @@ -37,3 +38,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../../pkg/translator/opencensus + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/pkg/translator/zipkin/go.sum b/pkg/translator/zipkin/go.sum index cca73a195aa4..9fd08c524d2a 100644 --- a/pkg/translator/zipkin/go.sum +++ b/pkg/translator/zipkin/go.sum @@ -146,8 +146,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= diff --git a/pkg/translator/zipkin/internal/zipkin/attributes.go b/pkg/translator/zipkin/internal/zipkin/attributes.go index 0c8e0eb7edac..123b745039a1 100644 --- a/pkg/translator/zipkin/internal/zipkin/attributes.go +++ b/pkg/translator/zipkin/internal/zipkin/attributes.go @@ -17,7 +17,7 @@ package zipkin // import "github.com/open-telemetry/opentelemetry-collector-cont import ( "regexp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) // These constants are the attribute keys used when translating from zipkin @@ -28,20 +28,20 @@ const ( ) var attrValDescriptions = []*attrValDescript{ - constructAttrValDescript("^$", pdata.ValueTypeEmpty), - constructAttrValDescript(`^-?\d+$`, pdata.ValueTypeInt), - constructAttrValDescript(`^-?\d+\.\d+$`, pdata.ValueTypeDouble), - constructAttrValDescript(`^(true|false)$`, pdata.ValueTypeBool), - constructAttrValDescript(`^\{"\w+":.+\}$`, pdata.ValueTypeMap), - constructAttrValDescript(`^\[.*\]$`, pdata.ValueTypeSlice), + constructAttrValDescript("^$", pcommon.ValueTypeEmpty), + constructAttrValDescript(`^-?\d+$`, pcommon.ValueTypeInt), + constructAttrValDescript(`^-?\d+\.\d+$`, pcommon.ValueTypeDouble), + constructAttrValDescript(`^(true|false)$`, pcommon.ValueTypeBool), + constructAttrValDescript(`^\{"\w+":.+\}$`, pcommon.ValueTypeMap), + constructAttrValDescript(`^\[.*\]$`, pcommon.ValueTypeSlice), } type attrValDescript struct { regex *regexp.Regexp - attrType pdata.ValueType + attrType pcommon.ValueType } -func constructAttrValDescript(regex string, attrType pdata.ValueType) *attrValDescript { +func constructAttrValDescript(regex string, attrType pcommon.ValueType) *attrValDescript { regexc := regexp.MustCompile(regex) return &attrValDescript{ regex: regexc, @@ -50,11 +50,11 @@ func constructAttrValDescript(regex string, attrType pdata.ValueType) *attrValDe } // DetermineValueType returns the native OTLP attribute type the string translates to. -func DetermineValueType(value string) pdata.ValueType { +func DetermineValueType(value string) pcommon.ValueType { for _, desc := range attrValDescriptions { if desc.regex.MatchString(value) { return desc.attrType } } - return pdata.ValueTypeString + return pcommon.ValueTypeString } diff --git a/pkg/translator/zipkin/zipkinv1/json.go b/pkg/translator/zipkin/zipkinv1/json.go index a6f516941d23..cccda031d967 100644 --- a/pkg/translator/zipkin/zipkinv1/json.go +++ b/pkg/translator/zipkin/zipkinv1/json.go @@ -25,7 +25,8 @@ import ( commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/types/known/timestamppb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" @@ -54,16 +55,16 @@ type jsonUnmarshaler struct { } // UnmarshalTraces from JSON bytes. -func (j jsonUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { +func (j jsonUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { tds, err := v1JSONBatchToOCProto(buf, j.ParseStringTags) if err != nil { - return pdata.Traces{}, err + return ptrace.Traces{}, err } return toTraces(tds) } // NewJSONTracesUnmarshaler returns an unmarshaler for Zipkin JSON. -func NewJSONTracesUnmarshaler(parseStringTags bool) pdata.TracesUnmarshaler { +func NewJSONTracesUnmarshaler(parseStringTags bool) ptrace.Unmarshaler { return jsonUnmarshaler{ParseStringTags: parseStringTags} } @@ -275,13 +276,13 @@ func parseAnnotationValue(value string, parseStringTags bool) *tracepb.Attribute if parseStringTags { switch zipkin.DetermineValueType(value) { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: iValue, _ := strconv.ParseInt(value, 10, 64) pbAttrib.Value = &tracepb.AttributeValue_IntValue{IntValue: iValue} - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: fValue, _ := strconv.ParseFloat(value, 64) pbAttrib.Value = &tracepb.AttributeValue_DoubleValue{DoubleValue: fValue} - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: bValue, _ := strconv.ParseBool(value) pbAttrib.Value = &tracepb.AttributeValue_BoolValue{BoolValue: bValue} default: diff --git a/pkg/translator/zipkin/zipkinv1/thrift.go b/pkg/translator/zipkin/zipkinv1/thrift.go index 04f30d4bccc3..46c991da91a1 100644 --- a/pkg/translator/zipkin/zipkinv1/thrift.go +++ b/pkg/translator/zipkin/zipkinv1/thrift.go @@ -26,7 +26,7 @@ import ( tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" jaegerzipkin "github.com/jaegertracing/jaeger/model/converter/thrift/zipkin" "github.com/jaegertracing/jaeger/thrift-gen/zipkincore" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/types/known/timestamppb" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" @@ -35,20 +35,20 @@ import ( type thriftUnmarshaler struct{} // UnmarshalTraces from Thrift bytes. -func (t thriftUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { +func (t thriftUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { spans, err := jaegerzipkin.DeserializeThrift(buf) if err != nil { - return pdata.Traces{}, err + return ptrace.Traces{}, err } tds, err := v1ThriftBatchToOCProto(spans) if err != nil { - return pdata.Traces{}, err + return ptrace.Traces{}, err } return toTraces(tds) } // NewThriftTracesUnmarshaler returns an unmarshaler for Zipkin Thrift. -func NewThriftTracesUnmarshaler() pdata.TracesUnmarshaler { +func NewThriftTracesUnmarshaler() ptrace.Unmarshaler { return thriftUnmarshaler{} } diff --git a/pkg/translator/zipkin/zipkinv1/to_translator.go b/pkg/translator/zipkin/zipkinv1/to_translator.go index e5d0e017cac2..9d20c6ea79eb 100644 --- a/pkg/translator/zipkin/zipkinv1/to_translator.go +++ b/pkg/translator/zipkin/zipkinv1/to_translator.go @@ -15,13 +15,13 @@ package zipkinv1 // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" internaldata "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus" ) -func toTraces(ocTraces []traceData) (pdata.Traces, error) { - td := pdata.NewTraces() +func toTraces(ocTraces []traceData) (ptrace.Traces, error) { + td := ptrace.NewTraces() for _, trace := range ocTraces { tmp := internaldata.OCToTraces(trace.Node, trace.Resource, trace.Spans) diff --git a/pkg/translator/zipkin/zipkinv2/from_translator.go b/pkg/translator/zipkin/zipkinv2/from_translator.go index 555295d6eaca..58b62d84c37a 100644 --- a/pkg/translator/zipkin/zipkinv2/from_translator.go +++ b/pkg/translator/zipkin/zipkinv2/from_translator.go @@ -23,8 +23,9 @@ import ( "time" zipkinmodel "github.com/openzipkin/zipkin-go/model" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -45,7 +46,7 @@ type FromTranslator struct{} // FromTraces translates internal trace data into Zipkin v2 spans. // Returns a slice of Zipkin SpanModel's. -func (t FromTranslator) FromTraces(td pdata.Traces) ([]*zipkinmodel.SpanModel, error) { +func (t FromTranslator) FromTraces(td ptrace.Traces) ([]*zipkinmodel.SpanModel, error) { resourceSpans := td.ResourceSpans() if resourceSpans.Len() == 0 { return nil, nil @@ -66,7 +67,7 @@ func (t FromTranslator) FromTraces(td pdata.Traces) ([]*zipkinmodel.SpanModel, e return zSpans, nil } -func resourceSpansToZipkinSpans(rs pdata.ResourceSpans, estSpanCount int) ([]*zipkinmodel.SpanModel, error) { +func resourceSpansToZipkinSpans(rs ptrace.ResourceSpans, estSpanCount int) ([]*zipkinmodel.SpanModel, error) { resource := rs.Resource() ilss := rs.ScopeSpans() @@ -93,7 +94,7 @@ func resourceSpansToZipkinSpans(rs pdata.ResourceSpans, estSpanCount int) ([]*zi return zSpans, nil } -func extractInstrumentationLibraryTags(il pdata.InstrumentationScope, zTags map[string]string) { +func extractInstrumentationLibraryTags(il pcommon.InstrumentationScope, zTags map[string]string) { if ilName := il.Name(); ilName != "" { zTags[conventions.OtelLibraryName] = ilName } @@ -103,7 +104,7 @@ func extractInstrumentationLibraryTags(il pdata.InstrumentationScope, zTags map[ } func spanToZipkinSpan( - span pdata.Span, + span ptrace.Span, localServiceName string, zTags map[string]string, ) (*zipkinmodel.SpanModel, error) { @@ -147,7 +148,7 @@ func spanToZipkinSpan( zs.Duration = time.Duration(span.EndTimestamp() - span.StartTimestamp()) } zs.Kind = spanKindToZipkinKind(span.Kind()) - if span.Kind() == pdata.SpanKindInternal { + if span.Kind() == ptrace.SpanKindInternal { tags[tracetranslator.TagSpanKind] = "internal" } @@ -170,8 +171,8 @@ func spanToZipkinSpan( return zs, nil } -func populateStatus(status pdata.SpanStatus, zs *zipkinmodel.SpanModel, tags map[string]string) { - if status.Code() == pdata.StatusCodeError { +func populateStatus(status ptrace.SpanStatus, zs *zipkinmodel.SpanModel, tags map[string]string) { + if status.Code() == ptrace.StatusCodeError { tags[tracetranslator.TagError] = "true" } else { // The error tag should only be set if Status is Error. If a boolean version @@ -183,7 +184,7 @@ func populateStatus(status pdata.SpanStatus, zs *zipkinmodel.SpanModel, tags map // Per specs, Span Status MUST be reported as a key-value pair in tags to Zipkin, unless it is UNSET. // In the latter case it MUST NOT be reported. // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk_exporters/zipkin.md#status - if status.Code() == pdata.StatusCodeUnset { + if status.Code() == ptrace.StatusCodeUnset { return } @@ -194,7 +195,7 @@ func populateStatus(status pdata.SpanStatus, zs *zipkinmodel.SpanModel, tags map } } -func aggregateSpanTags(span pdata.Span, zTags map[string]string) map[string]string { +func aggregateSpanTags(span ptrace.Span, zTags map[string]string) map[string]string { tags := make(map[string]string) for key, val := range zTags { tags[key] = val @@ -206,7 +207,7 @@ func aggregateSpanTags(span pdata.Span, zTags map[string]string) map[string]stri return tags } -func spanEventsToZipkinAnnotations(events pdata.SpanEventSlice, zs *zipkinmodel.SpanModel) error { +func spanEventsToZipkinAnnotations(events ptrace.SpanEventSlice, zs *zipkinmodel.SpanModel) error { if events.Len() > 0 { zAnnos := make([]zipkinmodel.Annotation, events.Len()) for i := 0; i < events.Len(); i++ { @@ -233,7 +234,7 @@ func spanEventsToZipkinAnnotations(events pdata.SpanEventSlice, zs *zipkinmodel. return nil } -func spanLinksToZipkinTags(links pdata.SpanLinkSlice, zTags map[string]string) error { +func spanLinksToZipkinTags(links ptrace.SpanLinkSlice, zTags map[string]string) error { for i := 0; i < links.Len(); i++ { link := links.At(i) key := fmt.Sprintf("otlp.link.%d", i) @@ -247,9 +248,9 @@ func spanLinksToZipkinTags(links pdata.SpanLinkSlice, zTags map[string]string) e return nil } -func attributeMapToStringMap(attrMap pdata.Map) map[string]string { +func attributeMapToStringMap(attrMap pcommon.Map) map[string]string { rawMap := make(map[string]string) - attrMap.Range(func(k string, v pdata.Value) bool { + attrMap.Range(func(k string, v pcommon.Value) bool { rawMap[k] = v.AsString() return true }) @@ -265,7 +266,7 @@ func removeRedundantTags(redundantKeys map[string]bool, zTags map[string]string) } func resourceToZipkinEndpointServiceNameAndAttributeMap( - resource pdata.Resource, + resource pcommon.Resource, ) (serviceName string, zTags map[string]string) { zTags = make(map[string]string) attrs := resource.Attributes() @@ -273,7 +274,7 @@ func resourceToZipkinEndpointServiceNameAndAttributeMap( return tracetranslator.ResourceNoServiceName, zTags } - attrs.Range(func(k string, v pdata.Value) bool { + attrs.Range(func(k string, v pcommon.Value) bool { zTags[k] = v.AsString() return true }) @@ -305,15 +306,15 @@ func extractZipkinServiceName(zTags map[string]string) string { return serviceName } -func spanKindToZipkinKind(kind pdata.SpanKind) zipkinmodel.Kind { +func spanKindToZipkinKind(kind ptrace.SpanKind) zipkinmodel.Kind { switch kind { - case pdata.SpanKindClient: + case ptrace.SpanKindClient: return zipkinmodel.Client - case pdata.SpanKindServer: + case ptrace.SpanKindServer: return zipkinmodel.Server - case pdata.SpanKindProducer: + case ptrace.SpanKindProducer: return zipkinmodel.Producer - case pdata.SpanKindConsumer: + case ptrace.SpanKindConsumer: return zipkinmodel.Consumer default: return zipkinmodel.Undetermined @@ -380,11 +381,11 @@ func isIPv6Address(ipStr string) bool { return false } -func convertTraceID(t pdata.TraceID) zipkinmodel.TraceID { +func convertTraceID(t pcommon.TraceID) zipkinmodel.TraceID { h, l := idutils.TraceIDToUInt64Pair(t) return zipkinmodel.TraceID{High: h, Low: l} } -func convertSpanID(s pdata.SpanID) zipkinmodel.ID { +func convertSpanID(s pcommon.SpanID) zipkinmodel.ID { return zipkinmodel.ID(idutils.SpanIDToUInt64(s)) } diff --git a/pkg/translator/zipkin/zipkinv2/from_translator_test.go b/pkg/translator/zipkin/zipkinv2/from_translator_test.go index 3bdb70ea292d..09b4ab83157b 100644 --- a/pkg/translator/zipkin/zipkinv2/from_translator_test.go +++ b/pkg/translator/zipkin/zipkinv2/from_translator_test.go @@ -20,8 +20,9 @@ import ( zipkinmodel "github.com/openzipkin/zipkin-go/model" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/goldendataset" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" @@ -31,13 +32,13 @@ import ( func TestInternalTracesToZipkinSpans(t *testing.T) { tests := []struct { name string - td pdata.Traces + td ptrace.Traces zs []*zipkinmodel.SpanModel err error }{ { name: "empty", - td: pdata.NewTraces(), + td: ptrace.NewTraces(), err: nil, }, { @@ -66,20 +67,20 @@ func TestInternalTracesToZipkinSpans(t *testing.T) { }, { name: "oneSpanOk", - td: generateTraceOneSpanOneTraceID(pdata.StatusCodeOk), - zs: []*zipkinmodel.SpanModel{zipkinOneSpan(pdata.StatusCodeOk)}, + td: generateTraceOneSpanOneTraceID(ptrace.StatusCodeOk), + zs: []*zipkinmodel.SpanModel{zipkinOneSpan(ptrace.StatusCodeOk)}, err: nil, }, { name: "oneSpanError", - td: generateTraceOneSpanOneTraceID(pdata.StatusCodeError), - zs: []*zipkinmodel.SpanModel{zipkinOneSpan(pdata.StatusCodeError)}, + td: generateTraceOneSpanOneTraceID(ptrace.StatusCodeError), + zs: []*zipkinmodel.SpanModel{zipkinOneSpan(ptrace.StatusCodeError)}, err: nil, }, { name: "oneSpanUnset", - td: generateTraceOneSpanOneTraceID(pdata.StatusCodeUnset), - zs: []*zipkinmodel.SpanModel{zipkinOneSpan(pdata.StatusCodeUnset)}, + td: generateTraceOneSpanOneTraceID(ptrace.StatusCodeUnset), + zs: []*zipkinmodel.SpanModel{zipkinOneSpan(ptrace.StatusCodeUnset)}, err: nil, }, } @@ -130,7 +131,7 @@ func TestInternalTracesToZipkinSpansAndBack(t *testing.T) { } } -func findSpanByID(rs pdata.ResourceSpansSlice, spanID pdata.SpanID) *pdata.Span { +func findSpanByID(rs ptrace.ResourceSpansSlice, spanID pcommon.SpanID) *ptrace.Span { for i := 0; i < rs.Len(); i++ { instSpans := rs.At(i).ScopeSpans() for j := 0; j < instSpans.Len(); j++ { @@ -146,27 +147,27 @@ func findSpanByID(rs pdata.ResourceSpansSlice, spanID pdata.SpanID) *pdata.Span return nil } -func generateTraceOneSpanOneTraceID(status pdata.StatusCode) pdata.Traces { +func generateTraceOneSpanOneTraceID(status ptrace.StatusCode) ptrace.Traces { td := testdata.GenerateTracesOneSpan() span := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) - span.SetTraceID(pdata.NewTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + span.SetTraceID(pcommon.NewTraceID([16]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10})) - span.SetSpanID(pdata.NewSpanID([8]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08})) switch status { - case pdata.StatusCodeError: - span.Status().SetCode(pdata.StatusCodeError) + case ptrace.StatusCodeError: + span.Status().SetCode(ptrace.StatusCodeError) span.Status().SetMessage("error message") - case pdata.StatusCodeOk: - span.Status().SetCode(pdata.StatusCodeOk) + case ptrace.StatusCodeOk: + span.Status().SetCode(ptrace.StatusCodeOk) span.Status().SetMessage("") default: - span.Status().SetCode(pdata.StatusCodeUnset) + span.Status().SetCode(ptrace.StatusCodeUnset) span.Status().SetMessage("") } return td } -func zipkinOneSpan(status pdata.StatusCode) *zipkinmodel.SpanModel { +func zipkinOneSpan(status ptrace.StatusCode) *zipkinmodel.SpanModel { trueBool := true var spanErr error @@ -175,9 +176,9 @@ func zipkinOneSpan(status pdata.StatusCode) *zipkinmodel.SpanModel { } switch status { - case pdata.StatusCodeOk: + case ptrace.StatusCodeOk: spanTags[conventions.OtelStatusCode] = "STATUS_CODE_OK" - case pdata.StatusCodeError: + case ptrace.StatusCodeError: spanTags[conventions.OtelStatusCode] = "STATUS_CODE_ERROR" spanTags[conventions.OtelStatusDescription] = "error message" spanTags[tracetranslator.TagError] = "true" diff --git a/pkg/translator/zipkin/zipkinv2/json.go b/pkg/translator/zipkin/zipkinv2/json.go index c590fef3200d..243f7d7d3688 100644 --- a/pkg/translator/zipkin/zipkinv2/json.go +++ b/pkg/translator/zipkin/zipkinv2/json.go @@ -19,7 +19,7 @@ import ( zipkinmodel "github.com/openzipkin/zipkin-go/model" zipkinreporter "github.com/openzipkin/zipkin-go/reporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) type jsonUnmarshaler struct { @@ -27,21 +27,21 @@ type jsonUnmarshaler struct { } // UnmarshalTraces from JSON bytes. -func (j jsonUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { +func (j jsonUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { var spans []*zipkinmodel.SpanModel if err := json.Unmarshal(buf, &spans); err != nil { - return pdata.Traces{}, err + return ptrace.Traces{}, err } return j.toTranslator.ToTraces(spans) } // NewJSONTracesUnmarshaler returns an unmarshaler for JSON bytes. -func NewJSONTracesUnmarshaler(parseStringTags bool) pdata.TracesUnmarshaler { +func NewJSONTracesUnmarshaler(parseStringTags bool) ptrace.Unmarshaler { return jsonUnmarshaler{toTranslator: ToTranslator{ParseStringTags: parseStringTags}} } // NewJSONTracesMarshaler returns a marshaler to JSON bytes. -func NewJSONTracesMarshaler() pdata.TracesMarshaler { +func NewJSONTracesMarshaler() ptrace.Marshaler { return marshaler{ serializer: zipkinreporter.JSONSerializer{}, } diff --git a/pkg/translator/zipkin/zipkinv2/json_test.go b/pkg/translator/zipkin/zipkinv2/json_test.go index 9589c3033f34..204faab20172 100644 --- a/pkg/translator/zipkin/zipkinv2/json_test.go +++ b/pkg/translator/zipkin/zipkinv2/json_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestJSONUnmarshaler_UnmarshalTraces(t *testing.T) { @@ -46,7 +46,7 @@ func TestJSONEncoder_EncodeTraces(t *testing.T) { } func TestJSONEncoder_EncodeTracesError(t *testing.T) { - invalidTD := pdata.NewTraces() + invalidTD := ptrace.NewTraces() // Add one span with empty trace ID. invalidTD.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() marshaler := NewJSONTracesMarshaler() diff --git a/pkg/translator/zipkin/zipkinv2/marshaler.go b/pkg/translator/zipkin/zipkinv2/marshaler.go index 1c929e96d775..293d78d6b7d4 100644 --- a/pkg/translator/zipkin/zipkinv2/marshaler.go +++ b/pkg/translator/zipkin/zipkinv2/marshaler.go @@ -16,7 +16,7 @@ package zipkinv2 // import "github.com/open-telemetry/opentelemetry-collector-co import ( zipkinreporter "github.com/openzipkin/zipkin-go/reporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) type marshaler struct { @@ -25,7 +25,7 @@ type marshaler struct { } // MarshalTraces to JSON bytes. -func (j marshaler) MarshalTraces(td pdata.Traces) ([]byte, error) { +func (j marshaler) MarshalTraces(td ptrace.Traces) ([]byte, error) { spans, err := j.fromTranslator.FromTraces(td) if err != nil { return nil, err diff --git a/pkg/translator/zipkin/zipkinv2/protobuf.go b/pkg/translator/zipkin/zipkinv2/protobuf.go index 2f9d67449dc9..f5d5c82de212 100644 --- a/pkg/translator/zipkin/zipkinv2/protobuf.go +++ b/pkg/translator/zipkin/zipkinv2/protobuf.go @@ -16,7 +16,7 @@ package zipkinv2 // import "github.com/open-telemetry/opentelemetry-collector-co import ( "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) type protobufUnmarshaler struct { @@ -28,24 +28,24 @@ type protobufUnmarshaler struct { } // UnmarshalTraces from protobuf bytes. -func (p protobufUnmarshaler) UnmarshalTraces(buf []byte) (pdata.Traces, error) { +func (p protobufUnmarshaler) UnmarshalTraces(buf []byte) (ptrace.Traces, error) { spans, err := zipkin_proto3.ParseSpans(buf, p.debugWasSet) if err != nil { - return pdata.Traces{}, err + return ptrace.Traces{}, err } return p.toTranslator.ToTraces(spans) } -// NewProtobufTracesUnmarshaler returns an pdata.TracesUnmarshaler of protobuf bytes. -func NewProtobufTracesUnmarshaler(debugWasSet, parseStringTags bool) pdata.TracesUnmarshaler { +// NewProtobufTracesUnmarshaler returns an ptrace.Unmarshaler of protobuf bytes. +func NewProtobufTracesUnmarshaler(debugWasSet, parseStringTags bool) ptrace.Unmarshaler { return protobufUnmarshaler{ debugWasSet: debugWasSet, toTranslator: ToTranslator{ParseStringTags: parseStringTags}, } } -// NewProtobufTracesMarshaler returns a new pdata.TracesMarshaler to protobuf bytes. -func NewProtobufTracesMarshaler() pdata.TracesMarshaler { +// NewProtobufTracesMarshaler returns a new ptrace.Marshaler to protobuf bytes. +func NewProtobufTracesMarshaler() ptrace.Marshaler { return marshaler{ serializer: zipkin_proto3.SpanSerializer{}, } diff --git a/pkg/translator/zipkin/zipkinv2/protobuf_test.go b/pkg/translator/zipkin/zipkinv2/protobuf_test.go index 4b6420189678..4ea83228252f 100644 --- a/pkg/translator/zipkin/zipkinv2/protobuf_test.go +++ b/pkg/translator/zipkin/zipkinv2/protobuf_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestProtobufMarshalUnmarshal(t *testing.T) { @@ -39,7 +39,7 @@ func TestProtobuf_UnmarshalTracesError(t *testing.T) { } func TestProtobuf_MarshalTracesError(t *testing.T) { - invalidTD := pdata.NewTraces() + invalidTD := ptrace.NewTraces() // Add one span with empty trace ID. invalidTD.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() marshaler := NewProtobufTracesMarshaler() diff --git a/pkg/translator/zipkin/zipkinv2/to_translator.go b/pkg/translator/zipkin/zipkinv2/to_translator.go index 4f7cba49b2a3..96ff270ad0fc 100644 --- a/pkg/translator/zipkin/zipkinv2/to_translator.go +++ b/pkg/translator/zipkin/zipkinv2/to_translator.go @@ -26,8 +26,9 @@ import ( "time" zipkinmodel "github.com/openzipkin/zipkin-go/model" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/occonventions" @@ -41,9 +42,9 @@ type ToTranslator struct { ParseStringTags bool } -// ToTraces translates Zipkin v2 spans into pdata.Traces. -func (t ToTranslator) ToTraces(zipkinSpans []*zipkinmodel.SpanModel) (pdata.Traces, error) { - traceData := pdata.NewTraces() +// ToTraces translates Zipkin v2 spans into ptrace.Traces. +func (t ToTranslator) ToTraces(zipkinSpans []*zipkinmodel.SpanModel) (ptrace.Traces, error) { + traceData := ptrace.NewTraces() if len(zipkinSpans) == 0 { return traceData, nil } @@ -54,9 +55,9 @@ func (t ToTranslator) ToTraces(zipkinSpans []*zipkinmodel.SpanModel) (pdata.Trac prevServiceName := "" prevInstrLibName := "" ilsIsNew := true - var curRscSpans pdata.ResourceSpans - var curILSpans pdata.ScopeSpans - var curSpans pdata.SpanSlice + var curRscSpans ptrace.ResourceSpans + var curILSpans ptrace.ScopeSpans + var curSpans ptrace.SpanSlice for _, zspan := range zipkinSpans { if zspan == nil { continue @@ -122,11 +123,11 @@ func (b byOTLPTypes) Swap(i, j int) { b[i], b[j] = b[j], b[i] } -func zSpanToInternal(zspan *zipkinmodel.SpanModel, tags map[string]string, dest pdata.Span, parseStringTags bool) error { +func zSpanToInternal(zspan *zipkinmodel.SpanModel, tags map[string]string, dest ptrace.Span, parseStringTags bool) error { dest.SetTraceID(idutils.UInt64ToTraceID(zspan.TraceID.High, zspan.TraceID.Low)) dest.SetSpanID(idutils.UInt64ToSpanID(uint64(zspan.ID))) if value, ok := tags[tracetranslator.TagW3CTraceState]; ok { - dest.SetTraceState(pdata.TraceState(value)) + dest.SetTraceState(ptrace.TraceState(value)) delete(tags, tracetranslator.TagW3CTraceState) } parentID := zspan.ParentID @@ -155,9 +156,9 @@ func zSpanToInternal(zspan *zipkinmodel.SpanModel, tags map[string]string, dest return err } -func populateSpanStatus(tags map[string]string, status pdata.SpanStatus) { +func populateSpanStatus(tags map[string]string, status ptrace.SpanStatus) { if value, ok := tags[conventions.OtelStatusCode]; ok { - status.SetCode(pdata.StatusCode(statusCodeValue[value])) + status.SetCode(ptrace.StatusCode(statusCodeValue[value])) delete(tags, conventions.OtelStatusCode) if value, ok := tags[conventions.OtelStatusDescription]; ok { status.SetMessage(value) @@ -167,34 +168,34 @@ func populateSpanStatus(tags map[string]string, status pdata.SpanStatus) { if val, ok := tags[tracetranslator.TagError]; ok { if val == "true" { - status.SetCode(pdata.StatusCodeError) + status.SetCode(ptrace.StatusCodeError) delete(tags, tracetranslator.TagError) } } } -func zipkinKindToSpanKind(kind zipkinmodel.Kind, tags map[string]string) pdata.SpanKind { +func zipkinKindToSpanKind(kind zipkinmodel.Kind, tags map[string]string) ptrace.SpanKind { switch kind { case zipkinmodel.Client: - return pdata.SpanKindClient + return ptrace.SpanKindClient case zipkinmodel.Server: - return pdata.SpanKindServer + return ptrace.SpanKindServer case zipkinmodel.Producer: - return pdata.SpanKindProducer + return ptrace.SpanKindProducer case zipkinmodel.Consumer: - return pdata.SpanKindConsumer + return ptrace.SpanKindConsumer default: if value, ok := tags[tracetranslator.TagSpanKind]; ok { delete(tags, tracetranslator.TagSpanKind) if value == "internal" { - return pdata.SpanKindInternal + return ptrace.SpanKindInternal } } - return pdata.SpanKindUnspecified + return ptrace.SpanKindUnspecified } } -func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { +func zTagsToSpanLinks(tags map[string]string, dest ptrace.SpanLinkSlice) error { for i := 0; i < 128; i++ { key := fmt.Sprintf("otlp.link.%d", i) val, ok := tags[key] @@ -216,7 +217,7 @@ func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { if errTrace != nil { return errTrace } - link.SetTraceID(pdata.NewTraceID(rawTrace)) + link.SetTraceID(pcommon.NewTraceID(rawTrace)) // Convert span id. rawSpan := [8]byte{} @@ -224,9 +225,9 @@ func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { if errSpan != nil { return errSpan } - link.SetSpanID(pdata.NewSpanID(rawSpan)) + link.SetSpanID(pcommon.NewSpanID(rawSpan)) - link.SetTraceState(pdata.TraceState(parts[2])) + link.SetTraceState(ptrace.TraceState(parts[2])) var jsonStr string if partCnt == 5 { @@ -252,11 +253,11 @@ func zTagsToSpanLinks(tags map[string]string, dest pdata.SpanLinkSlice) error { return nil } -func populateSpanEvents(zspan *zipkinmodel.SpanModel, events pdata.SpanEventSlice) error { +func populateSpanEvents(zspan *zipkinmodel.SpanModel, events ptrace.SpanEventSlice) error { events.EnsureCapacity(len(zspan.Annotations)) for _, anno := range zspan.Annotations { event := events.AppendEmpty() - event.SetTimestamp(pdata.NewTimestampFromTime(anno.Timestamp)) + event.SetTimestamp(pcommon.NewTimestampFromTime(anno.Timestamp)) parts := strings.Split(anno.Value, "|") partCnt := len(parts) @@ -289,7 +290,7 @@ func populateSpanEvents(zspan *zipkinmodel.SpanModel, events pdata.SpanEventSlic return nil } -func jsonMapToAttributeMap(attrs map[string]interface{}, dest pdata.Map) error { +func jsonMapToAttributeMap(attrs map[string]interface{}, dest pcommon.Map) error { for key, val := range attrs { if s, ok := val.(string); ok { dest.InsertString(key, s) @@ -306,7 +307,7 @@ func jsonMapToAttributeMap(attrs map[string]interface{}, dest pdata.Map) error { return nil } -func zTagsToInternalAttrs(zspan *zipkinmodel.SpanModel, tags map[string]string, dest pdata.Map, parseStringTags bool) error { +func zTagsToInternalAttrs(zspan *zipkinmodel.SpanModel, tags map[string]string, dest pcommon.Map, parseStringTags bool) error { parseErr := tagsToAttributeMap(tags, dest, parseStringTags) if zspan.LocalEndpoint != nil { if zspan.LocalEndpoint.IPv4 != nil { @@ -336,7 +337,7 @@ func zTagsToInternalAttrs(zspan *zipkinmodel.SpanModel, tags map[string]string, return parseErr } -func tagsToAttributeMap(tags map[string]string, dest pdata.Map, parseStringTags bool) error { +func tagsToAttributeMap(tags map[string]string, dest pcommon.Map, parseStringTags bool) error { var parseErr error for key, val := range tags { if _, ok := nonSpanAttributes[key]; ok { @@ -345,13 +346,13 @@ func tagsToAttributeMap(tags map[string]string, dest pdata.Map, parseStringTags if parseStringTags { switch zipkin.DetermineValueType(val) { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: iValue, _ := strconv.ParseInt(val, 10, 64) dest.UpsertInt(key, iValue) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: fValue, _ := strconv.ParseFloat(val, 64) dest.UpsertDouble(key, fValue) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: bValue, _ := strconv.ParseBool(val) dest.UpsertBool(key, bValue) default: @@ -364,7 +365,7 @@ func tagsToAttributeMap(tags map[string]string, dest pdata.Map, parseStringTags return parseErr } -func populateResourceFromZipkinSpan(tags map[string]string, localServiceName string, resource pdata.Resource) { +func populateResourceFromZipkinSpan(tags map[string]string, localServiceName string, resource pcommon.Resource) { if localServiceName == tracetranslator.ResourceNoServiceName { return } @@ -393,7 +394,7 @@ func populateResourceFromZipkinSpan(tags map[string]string, localServiceName str } } -func populateILFromZipkinSpan(tags map[string]string, instrLibName string, library pdata.InstrumentationScope) { +func populateILFromZipkinSpan(tags map[string]string, instrLibName string, library pcommon.InstrumentationScope) { if instrLibName == "" { return } @@ -429,27 +430,27 @@ func extractInstrumentationLibrary(zspan *zipkinmodel.SpanModel) string { return zspan.Tags[conventions.OtelLibraryName] } -func setTimestampsV2(zspan *zipkinmodel.SpanModel, dest pdata.Span, destAttrs pdata.Map) { +func setTimestampsV2(zspan *zipkinmodel.SpanModel, dest ptrace.Span, destAttrs pcommon.Map) { // zipkin allows timestamp to be unset, but otel span expects startTimestamp to have a value. // unset gets converted to zero on the zspan object during json deserialization because // time.Time (the type of Timestamp field) cannot be nil. If timestamp is zero, the // conversion from this internal format back to zipkin format in zipkin exporter fails. // Instead, set to *unix* time zero, and convert back in traces_to_zipkinv2.go if zspan.Timestamp.IsZero() { - unixTimeZero := pdata.NewTimestampFromTime(time.Unix(0, 0)) - zeroPlusDuration := pdata.NewTimestampFromTime(time.Unix(0, 0).Add(zspan.Duration)) + unixTimeZero := pcommon.NewTimestampFromTime(time.Unix(0, 0)) + zeroPlusDuration := pcommon.NewTimestampFromTime(time.Unix(0, 0).Add(zspan.Duration)) dest.SetStartTimestamp(unixTimeZero) dest.SetEndTimestamp(zeroPlusDuration) destAttrs.InsertBool(zipkin.StartTimeAbsent, true) } else { - dest.SetStartTimestamp(pdata.NewTimestampFromTime(zspan.Timestamp)) - dest.SetEndTimestamp(pdata.NewTimestampFromTime(zspan.Timestamp.Add(zspan.Duration))) + dest.SetStartTimestamp(pcommon.NewTimestampFromTime(zspan.Timestamp)) + dest.SetEndTimestamp(pcommon.NewTimestampFromTime(zspan.Timestamp.Add(zspan.Duration))) } } // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. -// TODO: Find a way to avoid this duplicate code. Consider to expose this in model/pdata. +// TODO: Find a way to avoid this duplicate code. Consider to expose this in pdata. func unmarshalJSON(dst []byte, src []byte) error { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { src = src[1 : l-1] @@ -470,7 +471,7 @@ func unmarshalJSON(dst []byte, src []byte) error { return nil } -// TODO: Find a way to avoid this duplicate code. Consider to expose this in model/pdata. +// TODO: Find a way to avoid this duplicate code. Consider to expose this in pdata. var statusCodeValue = map[string]int32{ "STATUS_CODE_UNSET": 0, "STATUS_CODE_OK": 1, diff --git a/pkg/translator/zipkin/zipkinv2/to_translator_test.go b/pkg/translator/zipkin/zipkinv2/to_translator_test.go index 3297388a9f3e..00bc303b4f14 100644 --- a/pkg/translator/zipkin/zipkinv2/to_translator_test.go +++ b/pkg/translator/zipkin/zipkinv2/to_translator_test.go @@ -20,8 +20,9 @@ import ( zipkinmodel "github.com/openzipkin/zipkin-go/model" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/internal/zipkin" ) @@ -30,13 +31,13 @@ func TestZipkinSpansToInternalTraces(t *testing.T) { tests := []struct { name string zs []*zipkinmodel.SpanModel - td pdata.Traces + td ptrace.Traces err error }{ { name: "empty", zs: make([]*zipkinmodel.SpanModel, 0), - td: pdata.NewTraces(), + td: ptrace.NewTraces(), err: nil, }, { @@ -75,8 +76,8 @@ func generateSpanNoEndpoints() []*zipkinmodel.SpanModel { spans[0] = &zipkinmodel.SpanModel{ SpanContext: zipkinmodel.SpanContext{ TraceID: convertTraceID( - pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), - ID: convertSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + pcommon.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), + ID: convertSpanID(pcommon.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), }, Name: "MinimalData", Kind: zipkinmodel.Client, @@ -106,20 +107,20 @@ func generateSpanErrorTags() []*zipkinmodel.SpanModel { return spans } -func generateTraceSingleSpanNoResourceOrInstrLibrary() pdata.Traces { - td := pdata.NewTraces() +func generateTraceSingleSpanNoResourceOrInstrLibrary() ptrace.Traces { + td := ptrace.NewTraces() span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetTraceID( - pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) - span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) + pcommon.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) span.SetName("MinimalData") - span.SetKind(pdata.SpanKindClient) + span.SetKind(ptrace.SpanKindClient) span.SetStartTimestamp(1596911098294000000) span.SetEndTimestamp(1596911098295000000) return td } -func generateTraceSingleSpanMinmalResource() pdata.Traces { +func generateTraceSingleSpanMinmalResource() ptrace.Traces { td := generateTraceSingleSpanNoResourceOrInstrLibrary() rs := td.ResourceSpans().At(0) rsc := rs.Resource() @@ -127,17 +128,17 @@ func generateTraceSingleSpanMinmalResource() pdata.Traces { return td } -func generateTraceSingleSpanErrorStatus() pdata.Traces { - td := pdata.NewTraces() +func generateTraceSingleSpanErrorStatus() ptrace.Traces { + td := ptrace.NewTraces() span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetTraceID( - pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) - span.SetSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) + pcommon.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})) span.SetName("MinimalData") - span.SetKind(pdata.SpanKindClient) + span.SetKind(ptrace.SpanKindClient) span.SetStartTimestamp(1596911098294000000) span.SetEndTimestamp(1596911098295000000) - span.Status().SetCode(pdata.StatusCodeError) + span.Status().SetCode(ptrace.StatusCodeError) return td } @@ -147,8 +148,8 @@ func TestV2SpanWithoutTimestampGetsTag(t *testing.T) { spans[0] = &zipkinmodel.SpanModel{ SpanContext: zipkinmodel.SpanContext{ TraceID: convertTraceID( - pdata.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), - ID: convertSpanID(pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), + pcommon.NewTraceID([16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80})), + ID: convertSpanID(pcommon.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8})), }, Name: "NoTimestamps", Kind: zipkinmodel.Client, diff --git a/pkg/winperfcounters/go.mod b/pkg/winperfcounters/go.mod index 2b0d2ba9e16c..406d21cef594 100644 --- a/pkg/winperfcounters/go.mod +++ b/pkg/winperfcounters/go.mod @@ -19,3 +19,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/attributesprocessor/attributes_log.go b/processor/attributesprocessor/attributes_log.go index de84c7507aff..5d3c95fce2dd 100644 --- a/processor/attributesprocessor/attributes_log.go +++ b/processor/attributesprocessor/attributes_log.go @@ -17,7 +17,8 @@ package attributesprocessor // import "github.com/open-telemetry/opentelemetry-c import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" @@ -43,7 +44,7 @@ func newLogAttributesProcessor(logger *zap.Logger, attrProc *attraction.AttrProc } } -func (a *logAttributesProcessor) processLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (a *logAttributesProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { rs := rls.At(i) @@ -72,7 +73,7 @@ func (a *logAttributesProcessor) processLogs(ctx context.Context, ld pdata.Logs) // The logic determining if a log should be processed is set // in the attribute configuration with the include and exclude settings. // Include properties are checked before exclude settings are checked. -func (a *logAttributesProcessor) skipLog(lr pdata.LogRecord, resource pdata.Resource, library pdata.InstrumentationScope) bool { +func (a *logAttributesProcessor) skipLog(lr plog.LogRecord, resource pcommon.Resource, library pcommon.InstrumentationScope) bool { if a.include != nil { // A false returned in this case means the log should not be processed. if include := a.include.MatchLogRecord(lr, resource, library); !include { diff --git a/processor/attributesprocessor/attributes_log_test.go b/processor/attributesprocessor/attributes_log_test.go index 039a37c8198c..f00cafbadfc2 100644 --- a/processor/attributesprocessor/attributes_log_test.go +++ b/processor/attributesprocessor/attributes_log_test.go @@ -23,7 +23,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" @@ -49,18 +50,18 @@ func runIndividualLogTestCase(t *testing.T, tt logTestCase, tp component.LogsPro }) } -func generateLogData(resourceName string, attrs map[string]interface{}) pdata.Logs { - td := pdata.NewLogs() +func generateLogData(resourceName string, attrs map[string]interface{}) plog.Logs { + td := plog.NewLogs() res := td.ResourceLogs().AppendEmpty() res.Resource().Attributes().InsertString("name", resourceName) sl := res.ScopeLogs().AppendEmpty() lr := sl.LogRecords().AppendEmpty() - pdata.NewMapFromRaw(attrs).CopyTo(lr.Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(lr.Attributes()) lr.Attributes().Sort() return td } -func sortLogAttributes(ld pdata.Logs) { +func sortLogAttributes(ld plog.Logs) { rss := ld.ResourceLogs() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) @@ -80,14 +81,14 @@ func sortLogAttributes(ld pdata.Logs) { func TestLogProcessor_NilEmptyData(t *testing.T) { type nilEmptyTestCase struct { name string - input pdata.Logs - output pdata.Logs + input plog.Logs + output plog.Logs } testCases := []nilEmptyTestCase{ { name: "empty", - input: pdata.NewLogs(), - output: pdata.NewLogs(), + input: plog.NewLogs(), + output: plog.NewLogs(), }, { name: "one-empty-resource-logs", diff --git a/processor/attributesprocessor/attributes_metric.go b/processor/attributesprocessor/attributes_metric.go index 55b02855179f..28a06845ac26 100644 --- a/processor/attributesprocessor/attributes_metric.go +++ b/processor/attributesprocessor/attributes_metric.go @@ -17,7 +17,7 @@ package attributesprocessor // import "github.com/open-telemetry/opentelemetry-c import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" @@ -43,7 +43,7 @@ func newMetricAttributesProcessor(logger *zap.Logger, attrProc *attraction.AttrP } } -func (a *metricAttributesProcessor) processMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (a *metricAttributesProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rs := rms.At(i) @@ -66,32 +66,32 @@ func (a *metricAttributesProcessor) processMetrics(ctx context.Context, md pdata // Attributes are provided for each log and trace, but not at the metric level // Need to process attributes for every data point within a metric. -func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, m pdata.Metric) { +func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, m pmetric.Metric) { // This is a lot of repeated code, but since there is no single parent superclass // between metric data types, we can't use polymorphism. switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps := m.Gauge().DataPoints() for i := 0; i < dps.Len(); i++ { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dps := m.Sum().DataPoints() for i := 0; i < dps.Len(); i++ { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dps := m.Histogram().DataPoints() for i := 0; i < dps.Len(); i++ { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: dps := m.ExponentialHistogram().DataPoints() for i := 0; i < dps.Len(); i++ { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: dps := m.Summary().DataPoints() for i := 0; i < dps.Len(); i++ { a.attrProc.Process(ctx, a.logger, dps.At(i).Attributes()) diff --git a/processor/attributesprocessor/attributes_metric_test.go b/processor/attributesprocessor/attributes_metric_test.go index b28ad9e8e655..f9c805743327 100644 --- a/processor/attributesprocessor/attributes_metric_test.go +++ b/processor/attributesprocessor/attributes_metric_test.go @@ -23,7 +23,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" @@ -49,42 +50,42 @@ func runIndividualMetricTestCase(t *testing.T, mt metricTestCase, mp component.M }) } -func generateMetricData(resourceName string, attrs map[string]interface{}) pdata.Metrics { - md := pdata.NewMetrics() +func generateMetricData(resourceName string, attrs map[string]interface{}) pmetric.Metrics { + md := pmetric.NewMetrics() res := md.ResourceMetrics().AppendEmpty() res.Resource().Attributes().InsertString("name", resourceName) sl := res.ScopeMetrics().AppendEmpty() m := sl.Metrics().AppendEmpty() switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps := m.Gauge().DataPoints() for i := 0; i < dps.Len(); i++ { - pdata.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) dps.At(i).Attributes().Sort() } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dps := m.Sum().DataPoints() for i := 0; i < dps.Len(); i++ { - pdata.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) dps.At(i).Attributes().Sort() } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dps := m.Histogram().DataPoints() for i := 0; i < dps.Len(); i++ { - pdata.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) dps.At(i).Attributes().Sort() } - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: dps := m.ExponentialHistogram().DataPoints() for i := 0; i < dps.Len(); i++ { - pdata.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) dps.At(i).Attributes().Sort() } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: dps := m.Summary().DataPoints() for i := 0; i < dps.Len(); i++ { - pdata.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(dps.At(i).Attributes()) dps.At(i).Attributes().Sort() } } @@ -92,7 +93,7 @@ func generateMetricData(resourceName string, attrs map[string]interface{}) pdata return md } -func sortMetricAttributes(md pdata.Metrics) { +func sortMetricAttributes(md pmetric.Metrics) { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rs := rms.At(i) @@ -104,27 +105,27 @@ func sortMetricAttributes(md pdata.Metrics) { m := metrics.At(k) switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps := m.Gauge().DataPoints() for l := 0; l < dps.Len(); l++ { dps.At(l).Attributes().Sort() } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dps := m.Sum().DataPoints() for l := 0; l < dps.Len(); l++ { dps.At(l).Attributes().Sort() } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dps := m.Histogram().DataPoints() for l := 0; l < dps.Len(); l++ { dps.At(l).Attributes().Sort() } - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: dps := m.ExponentialHistogram().DataPoints() for l := 0; l < dps.Len(); l++ { dps.At(l).Attributes().Sort() } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: dps := m.Summary().DataPoints() for l := 0; l < dps.Len(); l++ { dps.At(l).Attributes().Sort() @@ -139,15 +140,15 @@ func sortMetricAttributes(md pdata.Metrics) { func TestMetricProcessor_NilEmptyData(t *testing.T) { type nilEmptyMetricTestCase struct { name string - input pdata.Metrics - output pdata.Metrics + input pmetric.Metrics + output pmetric.Metrics } // TODO: Add test for "nil" Metric/Attributes. This needs support from data slices to allow to construct that. metricTestCases := []nilEmptyMetricTestCase{ { name: "empty", - input: pdata.NewMetrics(), - output: pdata.NewMetrics(), + input: pmetric.NewMetrics(), + output: pmetric.NewMetrics(), }, { name: "one-empty-resource-metrics", diff --git a/processor/attributesprocessor/attributes_trace.go b/processor/attributesprocessor/attributes_trace.go index 43b479f310dc..1b27785985fb 100644 --- a/processor/attributesprocessor/attributes_trace.go +++ b/processor/attributesprocessor/attributes_trace.go @@ -17,7 +17,7 @@ package attributesprocessor // import "github.com/open-telemetry/opentelemetry-c import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" @@ -43,7 +43,7 @@ func newSpanAttributesProcessor(logger *zap.Logger, attrProc *attraction.AttrPro } } -func (a *spanAttributesProcessor) processTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { +func (a *spanAttributesProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) diff --git a/processor/attributesprocessor/attributes_trace_test.go b/processor/attributesprocessor/attributes_trace_test.go index 18082cde4c2d..e994fe025168 100644 --- a/processor/attributesprocessor/attributes_trace_test.go +++ b/processor/attributesprocessor/attributes_trace_test.go @@ -23,8 +23,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" @@ -51,20 +52,20 @@ func runIndividualTestCase(t *testing.T, tt testCase, tp component.TracesProcess }) } -func generateTraceData(serviceName, spanName string, attrs map[string]interface{}) pdata.Traces { - td := pdata.NewTraces() +func generateTraceData(serviceName, spanName string, attrs map[string]interface{}) ptrace.Traces { + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() if serviceName != "" { rs.Resource().Attributes().UpsertString(conventions.AttributeServiceName, serviceName) } span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName(spanName) - pdata.NewMapFromRaw(attrs).CopyTo(span.Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(span.Attributes()) span.Attributes().Sort() return td } -func sortAttributes(td pdata.Traces) { +func sortAttributes(td ptrace.Traces) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) @@ -83,15 +84,15 @@ func sortAttributes(td pdata.Traces) { func TestSpanProcessor_NilEmptyData(t *testing.T) { type nilEmptyTestCase struct { name string - input pdata.Traces - output pdata.Traces + input ptrace.Traces + output ptrace.Traces } // TODO: Add test for "nil" Span/Attributes. This needs support from data slices to allow to construct that. testCases := []nilEmptyTestCase{ { name: "empty", - input: pdata.NewTraces(), - output: pdata.NewTraces(), + input: ptrace.NewTraces(), + output: ptrace.NewTraces(), }, { name: "one-empty-resource-spans", diff --git a/processor/attributesprocessor/go.mod b/processor/attributesprocessor/go.mod index 8ff895fac3b5..e8d99e774ddc 100644 --- a/processor/attributesprocessor/go.mod +++ b/processor/attributesprocessor/go.mod @@ -5,8 +5,9 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -15,7 +16,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -34,3 +35,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/attributesprocessor/go.sum b/processor/attributesprocessor/go.sum index d52328446b66..750ad79e4cac 100644 --- a/processor/attributesprocessor/go.sum +++ b/processor/attributesprocessor/go.sum @@ -18,7 +18,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -76,7 +76,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -106,8 +105,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -176,10 +175,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -219,7 +220,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -243,7 +244,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/cumulativetodeltaprocessor/go.mod b/processor/cumulativetodeltaprocessor/go.mod index 303d1c2dde39..9d9dba4f8d9b 100644 --- a/processor/cumulativetodeltaprocessor/go.mod +++ b/processor/cumulativetodeltaprocessor/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -14,7 +14,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -22,7 +22,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -33,3 +32,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/cumulativetodeltaprocessor/go.sum b/processor/cumulativetodeltaprocessor/go.sum index e899f5a73fe9..2b7090fb26d6 100644 --- a/processor/cumulativetodeltaprocessor/go.sum +++ b/processor/cumulativetodeltaprocessor/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -70,7 +70,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -148,8 +147,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -164,10 +161,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -207,7 +204,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -230,7 +227,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/identity.go b/processor/cumulativetodeltaprocessor/internal/tracking/identity.go index 17f68bed62f0..662476ae4535 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/identity.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/identity.go @@ -18,19 +18,20 @@ import ( "bytes" "strconv" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type MetricIdentity struct { - Resource pdata.Resource - InstrumentationLibrary pdata.InstrumentationScope - MetricDataType pdata.MetricDataType + Resource pcommon.Resource + InstrumentationLibrary pcommon.InstrumentationScope + MetricDataType pmetric.MetricDataType MetricIsMonotonic bool MetricName string MetricUnit string - StartTimestamp pdata.Timestamp - Attributes pdata.Map - MetricValueType pdata.MetricValueType + StartTimestamp pcommon.Timestamp + Attributes pcommon.Map + MetricValueType pmetric.MetricValueType } const A = int32('A') @@ -41,7 +42,7 @@ func (mi *MetricIdentity) Write(b *bytes.Buffer) { b.WriteRune(A + int32(mi.MetricDataType)) b.WriteByte(SEP) b.WriteRune(A + int32(mi.MetricValueType)) - mi.Resource.Attributes().Sort().Range(func(k string, v pdata.Value) bool { + mi.Resource.Attributes().Sort().Range(func(k string, v pcommon.Value) bool { b.WriteByte(SEP) b.WriteString(k) b.WriteByte(':') @@ -65,7 +66,7 @@ func (mi *MetricIdentity) Write(b *bytes.Buffer) { b.WriteByte(SEP) b.WriteString(mi.MetricUnit) - mi.Attributes.Sort().Range(func(k string, v pdata.Value) bool { + mi.Attributes.Sort().Range(func(k string, v pcommon.Value) bool { b.WriteByte(SEP) b.WriteString(k) b.WriteByte(':') @@ -77,9 +78,9 @@ func (mi *MetricIdentity) Write(b *bytes.Buffer) { } func (mi *MetricIdentity) IsFloatVal() bool { - return mi.MetricValueType == pdata.MetricValueTypeDouble + return mi.MetricValueType == pmetric.MetricValueTypeDouble } func (mi *MetricIdentity) IsSupportedMetricType() bool { - return mi.MetricDataType == pdata.MetricDataTypeSum + return mi.MetricDataType == pmetric.MetricDataTypeSum } diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/identity_test.go b/processor/cumulativetodeltaprocessor/internal/tracking/identity_test.go index 3a2fb2f95902..b53b0fe15bb5 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/identity_test.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/identity_test.go @@ -19,29 +19,30 @@ import ( "strings" "testing" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestMetricIdentity_Write(t *testing.T) { - resource := pdata.NewResource() + resource := pcommon.NewResource() resource.Attributes().InsertBool("resource", true) - il := pdata.NewInstrumentationScope() + il := pcommon.NewInstrumentationScope() il.SetName("ilm_name") il.SetVersion("ilm_version") - attributes := pdata.NewMap() + attributes := pcommon.NewMap() attributes.InsertString("label", "value") type fields struct { - Resource pdata.Resource - InstrumentationLibrary pdata.InstrumentationScope - MetricDataType pdata.MetricDataType + Resource pcommon.Resource + InstrumentationLibrary pcommon.InstrumentationScope + MetricDataType pmetric.MetricDataType MetricIsMonotonic bool MetricName string MetricUnit string - StartTimestamp pdata.Timestamp - Attributes pdata.Map - MetricValueType pdata.MetricValueType + StartTimestamp pcommon.Timestamp + Attributes pcommon.Map + MetricValueType pmetric.MetricValueType } tests := []struct { name string @@ -65,8 +66,8 @@ func TestMetricIdentity_Write(t *testing.T) { Resource: resource, InstrumentationLibrary: il, Attributes: attributes, - MetricDataType: pdata.MetricDataTypeSum, - MetricValueType: pdata.MetricValueTypeInt, + MetricDataType: pmetric.MetricDataTypeSum, + MetricValueType: pmetric.MetricValueTypeInt, MetricIsMonotonic: true, }, want: []string{"C" + SEPSTR + "B", "Y"}, @@ -99,7 +100,7 @@ func TestMetricIdentity_Write(t *testing.T) { func TestMetricIdentity_IsFloatVal(t *testing.T) { type fields struct { - MetricValueType pdata.MetricValueType + MetricValueType pmetric.MetricValueType } tests := []struct { name string @@ -109,14 +110,14 @@ func TestMetricIdentity_IsFloatVal(t *testing.T) { { name: "float", fields: fields{ - MetricValueType: pdata.MetricValueTypeDouble, + MetricValueType: pmetric.MetricValueTypeDouble, }, want: true, }, { name: "int", fields: fields{ - MetricValueType: pdata.MetricValueTypeInt, + MetricValueType: pmetric.MetricValueTypeInt, }, want: false, }, @@ -124,10 +125,10 @@ func TestMetricIdentity_IsFloatVal(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mi := &MetricIdentity{ - Resource: pdata.NewResource(), - InstrumentationLibrary: pdata.NewInstrumentationScope(), - Attributes: pdata.NewMap(), - MetricDataType: pdata.MetricDataTypeSum, + Resource: pcommon.NewResource(), + InstrumentationLibrary: pcommon.NewInstrumentationScope(), + Attributes: pcommon.NewMap(), + MetricDataType: pmetric.MetricDataTypeSum, MetricValueType: tt.fields.MetricValueType, } if got := mi.IsFloatVal(); got != tt.want { @@ -139,7 +140,7 @@ func TestMetricIdentity_IsFloatVal(t *testing.T) { func TestMetricIdentity_IsSupportedMetricType(t *testing.T) { type fields struct { - MetricDataType pdata.MetricDataType + MetricDataType pmetric.MetricDataType } tests := []struct { name string @@ -149,14 +150,14 @@ func TestMetricIdentity_IsSupportedMetricType(t *testing.T) { { name: "sum", fields: fields{ - MetricDataType: pdata.MetricDataTypeSum, + MetricDataType: pmetric.MetricDataTypeSum, }, want: true, }, { name: "histogram", fields: fields{ - MetricDataType: pdata.MetricDataTypeHistogram, + MetricDataType: pmetric.MetricDataTypeHistogram, }, want: false, }, @@ -164,9 +165,9 @@ func TestMetricIdentity_IsSupportedMetricType(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mi := &MetricIdentity{ - Resource: pdata.NewResource(), - InstrumentationLibrary: pdata.NewInstrumentationScope(), - Attributes: pdata.NewMap(), + Resource: pcommon.NewResource(), + InstrumentationLibrary: pcommon.NewInstrumentationScope(), + Attributes: pcommon.NewMap(), MetricDataType: tt.fields.MetricDataType, } if got := mi.IsSupportedMetricType(); got != tt.want { diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/tracker.go b/processor/cumulativetodeltaprocessor/internal/tracking/tracker.go index 74a0c83b160a..36b81c595485 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/tracker.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/tracker.go @@ -21,7 +21,7 @@ import ( "sync" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -41,7 +41,7 @@ type State struct { } type DeltaValue struct { - StartTimestamp pdata.Timestamp + StartTimestamp pcommon.Timestamp FloatValue float64 IntValue int64 } @@ -136,7 +136,7 @@ func (t *MetricTracker) Convert(in MetricPoint) (out DeltaValue, valid bool) { return } -func (t *MetricTracker) removeStale(staleBefore pdata.Timestamp) { +func (t *MetricTracker) removeStale(staleBefore pcommon.Timestamp) { t.states.Range(func(key, value interface{}) bool { s := value.(*State) @@ -164,12 +164,12 @@ func (t *MetricTracker) removeStale(staleBefore pdata.Timestamp) { }) } -func (t *MetricTracker) sweeper(ctx context.Context, remove func(pdata.Timestamp)) { +func (t *MetricTracker) sweeper(ctx context.Context, remove func(pcommon.Timestamp)) { ticker := time.NewTicker(t.maxStaleness) for { select { case currentTime := <-ticker.C: - staleBefore := pdata.NewTimestampFromTime(currentTime.Add(-t.maxStaleness)) + staleBefore := pcommon.NewTimestampFromTime(currentTime.Add(-t.maxStaleness)) remove(staleBefore) case <-ctx.Done(): ticker.Stop() diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go index 00c095518076..2365da2ec5cb 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go @@ -21,23 +21,24 @@ import ( "testing" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) func TestMetricTracker_Convert(t *testing.T) { miSum := MetricIdentity{ - Resource: pdata.NewResource(), - InstrumentationLibrary: pdata.NewInstrumentationScope(), - MetricDataType: pdata.MetricDataTypeSum, + Resource: pcommon.NewResource(), + InstrumentationLibrary: pcommon.NewInstrumentationScope(), + MetricDataType: pmetric.MetricDataTypeSum, MetricIsMonotonic: true, MetricName: "", MetricUnit: "", - Attributes: pdata.NewMap(), + Attributes: pcommon.NewMap(), } miIntSum := miSum - miIntSum.MetricValueType = pdata.MetricValueTypeInt - miSum.MetricValueType = pdata.MetricValueTypeDouble + miIntSum.MetricValueType = pmetric.MetricValueTypeInt + miSum.MetricValueType = pmetric.MetricValueTypeDouble m := NewMetricTracker(context.Background(), zap.NewNop(), 0) @@ -135,7 +136,7 @@ func TestMetricTracker_Convert(t *testing.T) { t.Run("Invalid metric identity", func(t *testing.T) { invalidID := miIntSum - invalidID.MetricDataType = pdata.MetricDataTypeGauge + invalidID.MetricDataType = pmetric.MetricDataTypeGauge _, valid := m.Convert(MetricPoint{ Identity: invalidID, Value: ValuePoint{ @@ -151,7 +152,7 @@ func TestMetricTracker_Convert(t *testing.T) { } func Test_metricTracker_removeStale(t *testing.T) { - currentTime := pdata.Timestamp(100) + currentTime := pcommon.Timestamp(100) freshPoint := ValuePoint{ ObservedTimestamp: currentTime, } @@ -214,10 +215,10 @@ func Test_metricTracker_removeStale(t *testing.T) { func Test_metricTracker_sweeper(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - sweepEvent := make(chan pdata.Timestamp) + sweepEvent := make(chan pcommon.Timestamp) closed := int32(0) - onSweep := func(staleBefore pdata.Timestamp) { + onSweep := func(staleBefore pcommon.Timestamp) { sweepEvent <- staleBefore } diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/value.go b/processor/cumulativetodeltaprocessor/internal/tracking/value.go index 4185b50d989c..94f55907c243 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/value.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/value.go @@ -14,12 +14,10 @@ package tracking // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor/internal/tracking" -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/pdata/pcommon" type ValuePoint struct { - ObservedTimestamp pdata.Timestamp + ObservedTimestamp pcommon.Timestamp FloatValue float64 IntValue int64 } diff --git a/processor/cumulativetodeltaprocessor/processor.go b/processor/cumulativetodeltaprocessor/processor.go index cac67762f313..7bdda7379f94 100644 --- a/processor/cumulativetodeltaprocessor/processor.go +++ b/processor/cumulativetodeltaprocessor/processor.go @@ -18,7 +18,7 @@ import ( "context" "math" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor/internal/tracking" @@ -48,20 +48,20 @@ func newCumulativeToDeltaProcessor(config *Config, logger *zap.Logger) *cumulati } // processMetrics implements the ProcessMetricsFunc type. -func (ctdp *cumulativeToDeltaProcessor) processMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (ctdp *cumulativeToDeltaProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { resourceMetricsSlice := md.ResourceMetrics() - resourceMetricsSlice.RemoveIf(func(rm pdata.ResourceMetrics) bool { + resourceMetricsSlice.RemoveIf(func(rm pmetric.ResourceMetrics) bool { ilms := rm.ScopeMetrics() - ilms.RemoveIf(func(ilm pdata.ScopeMetrics) bool { + ilms.RemoveIf(func(ilm pmetric.ScopeMetrics) bool { ms := ilm.Metrics() - ms.RemoveIf(func(m pdata.Metric) bool { + ms.RemoveIf(func(m pmetric.Metric) bool { if _, ok := ctdp.metrics[m.Name()]; !ok { return false } switch m.DataType() { - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: ms := m.Sum() - if ms.AggregationTemporality() != pdata.MetricAggregationTemporalityCumulative { + if ms.AggregationTemporality() != pmetric.MetricAggregationTemporalityCumulative { return false } @@ -79,7 +79,7 @@ func (ctdp *cumulativeToDeltaProcessor) processMetrics(_ context.Context, md pda MetricIsMonotonic: ms.IsMonotonic(), } ctdp.convertDataPoints(ms.DataPoints(), baseIdentity) - ms.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + ms.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) return ms.DataPoints().Len() == 0 default: return false @@ -99,8 +99,8 @@ func (ctdp *cumulativeToDeltaProcessor) shutdown(context.Context) error { func (ctdp *cumulativeToDeltaProcessor) convertDataPoints(in interface{}, baseIdentity tracking.MetricIdentity) { switch dps := in.(type) { - case pdata.NumberDataPointSlice: - dps.RemoveIf(func(dp pdata.NumberDataPoint) bool { + case pmetric.NumberDataPointSlice: + dps.RemoveIf(func(dp pmetric.NumberDataPoint) bool { id := baseIdentity id.StartTimestamp = dp.StartTimestamp() id.Attributes = dp.Attributes() diff --git a/processor/cumulativetodeltaprocessor/processor_test.go b/processor/cumulativetodeltaprocessor/processor_test.go index de9104630696..f254082ac51c 100644 --- a/processor/cumulativetodeltaprocessor/processor_test.go +++ b/processor/cumulativetodeltaprocessor/processor_test.go @@ -26,7 +26,8 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -39,8 +40,8 @@ type testMetric struct { type cumulativeToDeltaTest struct { name string metrics []string - inMetrics pdata.Metrics - outMetrics pdata.Metrics + inMetrics pmetric.Metrics + outMetrics pmetric.Metrics } var ( @@ -132,7 +133,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, eM.Name(), aM.Name()) - if eM.DataType() == pdata.MetricDataTypeGauge { + if eM.DataType() == pmetric.MetricDataTypeGauge { eDataPoints := eM.Gauge().DataPoints() aDataPoints := aM.Gauge().DataPoints() require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) @@ -142,7 +143,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { } } - if eM.DataType() == pdata.MetricDataTypeSum { + if eM.DataType() == pmetric.MetricDataTypeSum { eDataPoints := eM.Sum().DataPoints() aDataPoints := aM.Sum().DataPoints() @@ -165,8 +166,8 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { } } -func generateTestMetrics(tm testMetric) pdata.Metrics { - md := pdata.NewMetrics() +func generateTestMetrics(tm testMetric) pmetric.Metrics { + md := pmetric.NewMetrics() now := time.Now() rm := md.ResourceMetrics().AppendEmpty() @@ -174,20 +175,20 @@ func generateTestMetrics(tm testMetric) pdata.Metrics { for i, name := range tm.metricNames { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) sum := m.Sum() sum.SetIsMonotonic(true) if tm.isCumulative[i] { - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } else { - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) } for _, value := range tm.metricValues[i] { dp := m.Sum().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(10 * time.Second))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) dp.SetDoubleVal(value) } } @@ -210,21 +211,21 @@ func BenchmarkConsumeMetrics(b *testing.B) { b.Fatal(err) } - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() rms := metrics.ResourceMetrics().AppendEmpty() r := rms.Resource() - r.Attributes().Insert("resource", pdata.NewValueBool(true)) + r.Attributes().Insert("resource", pcommon.NewValueBool(true)) ilms := rms.ScopeMetrics().AppendEmpty() ilms.Scope().SetName("test") ilms.Scope().SetVersion("0.1") m := ilms.Metrics().AppendEmpty() - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) m.Sum().SetIsMonotonic(true) dp := m.Sum().DataPoints().AppendEmpty() - dp.Attributes().Insert("tag", pdata.NewValueString("value")) + dp.Attributes().Insert("tag", pcommon.NewValueString("value")) reset := func() { - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp.SetDoubleVal(100.0) } diff --git a/processor/deltatorateprocessor/go.mod b/processor/deltatorateprocessor/go.mod index 7d1dbc23f637..25b4879caa59 100644 --- a/processor/deltatorateprocessor/go.mod +++ b/processor/deltatorateprocessor/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -14,7 +14,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -22,7 +22,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -33,3 +32,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/deltatorateprocessor/go.sum b/processor/deltatorateprocessor/go.sum index e899f5a73fe9..2b7090fb26d6 100644 --- a/processor/deltatorateprocessor/go.sum +++ b/processor/deltatorateprocessor/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -70,7 +70,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -148,8 +147,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -164,10 +161,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -207,7 +204,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -230,7 +227,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/deltatorateprocessor/processor.go b/processor/deltatorateprocessor/processor.go index 583a9cfe4bcd..4a6027d96dbb 100644 --- a/processor/deltatorateprocessor/processor.go +++ b/processor/deltatorateprocessor/processor.go @@ -21,7 +21,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -48,7 +48,7 @@ func (dtrp *deltaToRateProcessor) Start(context.Context, component.Host) error { } // processMetrics implements the ProcessMetricsFunc type. -func (dtrp *deltaToRateProcessor) processMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (dtrp *deltaToRateProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { @@ -62,11 +62,11 @@ func (dtrp *deltaToRateProcessor) processMetrics(_ context.Context, md pdata.Met if _, ok := dtrp.ConfiguredMetrics[metric.Name()]; !ok { continue } - if metric.DataType() != pdata.MetricDataTypeSum || metric.Sum().AggregationTemporality() != pdata.MetricAggregationTemporalityDelta { + if metric.DataType() != pmetric.MetricDataTypeSum || metric.Sum().AggregationTemporality() != pmetric.MetricAggregationTemporalityDelta { dtrp.logger.Info(fmt.Sprintf("Configured metric for rate calculation %s is not a delta sum\n", metric.Name())) continue } - newDoubleDataPointSlice := pdata.NewNumberDataPointSlice() + newDoubleDataPointSlice := pmetric.NewNumberDataPointSlice() dataPoints := metric.Sum().DataPoints() for i := 0; i < dataPoints.Len(); i++ { @@ -77,9 +77,9 @@ func (dtrp *deltaToRateProcessor) processMetrics(_ context.Context, md pdata.Met durationNanos := time.Duration(fromDataPoint.Timestamp() - fromDataPoint.StartTimestamp()) var rate float64 switch fromDataPoint.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: rate = calculateRate(fromDataPoint.DoubleVal(), durationNanos) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: rate = calculateRate(float64(fromDataPoint.IntVal()), durationNanos) default: return md, consumererror.NewPermanent(fmt.Errorf("invalid data point type:%d", fromDataPoint.ValueType())) @@ -87,7 +87,7 @@ func (dtrp *deltaToRateProcessor) processMetrics(_ context.Context, md pdata.Met newDp.SetDoubleVal(rate) } - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) for d := 0; d < newDoubleDataPointSlice.Len(); d++ { dp := metric.Gauge().DataPoints().AppendEmpty() newDoubleDataPointSlice.At(d).CopyTo(dp) diff --git a/processor/deltatorateprocessor/processor_test.go b/processor/deltatorateprocessor/processor_test.go index 43b14b71cf92..0f5c920cbf22 100644 --- a/processor/deltatorateprocessor/processor_test.go +++ b/processor/deltatorateprocessor/processor_test.go @@ -24,7 +24,8 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type testMetric struct { @@ -38,8 +39,8 @@ type testMetric struct { type deltaToRateTest struct { name string metrics []string - inMetrics pdata.Metrics - outMetrics pdata.Metrics + inMetrics pmetric.Metrics + outMetrics pmetric.Metrics } var ( @@ -163,7 +164,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, eM.Name(), aM.Name()) - if eM.DataType() == pdata.MetricDataTypeGauge { + if eM.DataType() == pmetric.MetricDataTypeGauge { eDataPoints := eM.Gauge().DataPoints() aDataPoints := aM.Gauge().DataPoints() require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) @@ -173,7 +174,7 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { } } - if eM.DataType() == pdata.MetricDataTypeSum { + if eM.DataType() == pmetric.MetricDataTypeSum { eDataPoints := eM.Sum().DataPoints() aDataPoints := aM.Sum().DataPoints() @@ -192,8 +193,8 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { } } -func generateSumMetrics(tm testMetric) pdata.Metrics { - md := pdata.NewMetrics() +func generateSumMetrics(tm testMetric) pmetric.Metrics { + md := pmetric.NewMetrics() now := time.Now() delta := time.Duration(tm.deltaSecond) @@ -202,30 +203,30 @@ func generateSumMetrics(tm testMetric) pdata.Metrics { for i, name := range tm.metricNames { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeSum) + m.SetDataType(pmetric.MetricDataTypeSum) sum := m.Sum() sum.SetIsMonotonic(true) if tm.isDelta[i] { - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) } else { - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } if i < len(tm.metricValues) { for _, value := range tm.metricValues[i] { dp := m.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(pdata.NewTimestampFromTime(now)) - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(delta * time.Second))) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(delta * time.Second))) dp.SetDoubleVal(value) } } if i < len(tm.metricIntValues) { for _, value := range tm.metricIntValues[i] { dp := m.Sum().DataPoints().AppendEmpty() - dp.SetStartTimestamp(pdata.NewTimestampFromTime(now)) - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(delta * time.Second))) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(delta * time.Second))) dp.SetIntVal(value) } } @@ -234,8 +235,8 @@ func generateSumMetrics(tm testMetric) pdata.Metrics { return md } -func generateGaugeMetrics(tm testMetric) pdata.Metrics { - md := pdata.NewMetrics() +func generateGaugeMetrics(tm testMetric) pmetric.Metrics { + md := pmetric.NewMetrics() now := time.Now() rm := md.ResourceMetrics().AppendEmpty() @@ -243,18 +244,18 @@ func generateGaugeMetrics(tm testMetric) pdata.Metrics { for i, name := range tm.metricNames { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) if i < len(tm.metricValues) { for _, value := range tm.metricValues[i] { dp := m.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(120 * time.Second))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(120 * time.Second))) dp.SetDoubleVal(value) } } if i < len(tm.metricIntValues) { for _, value := range tm.metricIntValues[i] { dp := m.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(120 * time.Second))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(120 * time.Second))) dp.SetIntVal(value) } } diff --git a/processor/filterprocessor/config.go b/processor/filterprocessor/config.go index 799ae9f8c374..a58d8729e3d0 100644 --- a/processor/filterprocessor/config.go +++ b/processor/filterprocessor/config.go @@ -56,11 +56,11 @@ type LogFilters struct { Exclude *LogMatchProperties `mapstructure:"exclude"` } -// LogMatchType specifies the strategy for matching against `pdata.Log`s. +// LogMatchType specifies the strategy for matching against `plog.Log`s. type LogMatchType string // These are the MatchTypes that users can specify for filtering -// `pdata.Log`s. +// `plog.Log`s. const ( Strict = LogMatchType(filterset.Strict) Regexp = LogMatchType(filterset.Regexp) diff --git a/processor/filterprocessor/expr_test.go b/processor/filterprocessor/expr_test.go index d05cd02b50c2..7857cedffd00 100644 --- a/processor/filterprocessor/expr_test.go +++ b/processor/filterprocessor/expr_test.go @@ -24,7 +24,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -36,37 +37,37 @@ import ( const filteredMetric = "p0_metric_1" const filteredAttrKey = "pt-label-key-1" -var filteredAttrVal = pdata.NewValueString("pt-label-val-1") +var filteredAttrVal = pcommon.NewValueString("pt-label-val-1") func TestExprError(t *testing.T) { - testMatchError(t, pdata.MetricDataTypeGauge, pdata.MetricValueTypeInt) - testMatchError(t, pdata.MetricDataTypeGauge, pdata.MetricValueTypeDouble) - testMatchError(t, pdata.MetricDataTypeSum, pdata.MetricValueTypeInt) - testMatchError(t, pdata.MetricDataTypeSum, pdata.MetricValueTypeDouble) - testMatchError(t, pdata.MetricDataTypeHistogram, pdata.MetricValueTypeNone) + testMatchError(t, pmetric.MetricDataTypeGauge, pmetric.MetricValueTypeInt) + testMatchError(t, pmetric.MetricDataTypeGauge, pmetric.MetricValueTypeDouble) + testMatchError(t, pmetric.MetricDataTypeSum, pmetric.MetricValueTypeInt) + testMatchError(t, pmetric.MetricDataTypeSum, pmetric.MetricValueTypeDouble) + testMatchError(t, pmetric.MetricDataTypeHistogram, pmetric.MetricValueTypeNone) } -func testMatchError(t *testing.T, mdType pdata.MetricDataType, mvType pdata.MetricValueType) { +func testMatchError(t *testing.T, mdType pmetric.MetricDataType, mvType pmetric.MetricValueType) { // the "foo" expr expression will cause expr Run() to return an error proc, next, logs := testProcessor(t, nil, []string{"foo"}) pdm := testData("", 1, mdType, mvType) err := proc.ConsumeMetrics(context.Background(), pdm) assert.NoError(t, err) // assert that metrics not be filtered as a result - assert.Equal(t, []pdata.Metrics{pdm}, next.AllMetrics()) + assert.Equal(t, []pmetric.Metrics{pdm}, next.AllMetrics()) assert.Equal(t, 1, logs.Len()) assert.Equal(t, "shouldKeepMetric failed", logs.All()[0].Message) } func TestExprProcessor(t *testing.T) { - testFilter(t, pdata.MetricDataTypeGauge, pdata.MetricValueTypeInt) - testFilter(t, pdata.MetricDataTypeGauge, pdata.MetricValueTypeDouble) - testFilter(t, pdata.MetricDataTypeSum, pdata.MetricValueTypeInt) - testFilter(t, pdata.MetricDataTypeSum, pdata.MetricValueTypeDouble) - testFilter(t, pdata.MetricDataTypeHistogram, pdata.MetricValueTypeNone) + testFilter(t, pmetric.MetricDataTypeGauge, pmetric.MetricValueTypeInt) + testFilter(t, pmetric.MetricDataTypeGauge, pmetric.MetricValueTypeDouble) + testFilter(t, pmetric.MetricDataTypeSum, pmetric.MetricValueTypeInt) + testFilter(t, pmetric.MetricDataTypeSum, pmetric.MetricValueTypeDouble) + testFilter(t, pmetric.MetricDataTypeHistogram, pmetric.MetricValueTypeNone) } -func testFilter(t *testing.T, mdType pdata.MetricDataType, mvType pdata.MetricValueType) { +func testFilter(t *testing.T, mdType pmetric.MetricDataType, mvType pmetric.MetricValueType) { format := "MetricName == '%s' && Label('%s') == '%s'" q := fmt.Sprintf(format, filteredMetric, filteredAttrKey, filteredAttrVal.StringVal()) @@ -92,17 +93,17 @@ func testFilter(t *testing.T, mdType pdata.MetricDataType, mvType pdata.MetricVa if metric.Name() == filteredMetric { dt := metric.DataType() switch dt { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: pts := metric.Gauge().DataPoints() for l := 0; l < pts.Len(); l++ { assertFiltered(t, pts.At(l).Attributes()) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: pts := metric.Sum().DataPoints() for l := 0; l < pts.Len(); l++ { assertFiltered(t, pts.At(l).Attributes()) } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: pts := metric.Histogram().DataPoints() for l := 0; l < pts.Len(); l++ { assertFiltered(t, pts.At(l).Attributes()) @@ -116,8 +117,8 @@ func testFilter(t *testing.T, mdType pdata.MetricDataType, mvType pdata.MetricVa assert.Equal(t, expectedMetricCount, filteredMetricCount) } -func assertFiltered(t *testing.T, lm pdata.Map) { - lm.Range(func(k string, v pdata.Value) bool { +func assertFiltered(t *testing.T, lm pcommon.Map) { + lm.Range(func(k string, v pcommon.Value) bool { if k == filteredAttrKey && v.Equal(filteredAttrVal) { assert.Fail(t, "found metric that should have been filtered out") return false @@ -126,7 +127,7 @@ func assertFiltered(t *testing.T, lm pdata.Map) { }) } -func filterMetrics(t *testing.T, include []string, exclude []string, mds []pdata.Metrics) []pdata.Metrics { +func filterMetrics(t *testing.T, include []string, exclude []string, mds []pmetric.Metrics) []pmetric.Metrics { proc, next, _ := testProcessor(t, include, exclude) for _, md := range mds { err := proc.ConsumeMetrics(context.Background(), md) @@ -175,15 +176,15 @@ func exprConfig(factory component.ProcessorFactory, include []string, exclude [] return cfg } -func testDataSlice(size int, mdType pdata.MetricDataType, mvType pdata.MetricValueType) []pdata.Metrics { - var out []pdata.Metrics +func testDataSlice(size int, mdType pmetric.MetricDataType, mvType pmetric.MetricValueType) []pmetric.Metrics { + var out []pmetric.Metrics for i := 0; i < 16; i++ { out = append(out, testData(fmt.Sprintf("p%d_", i), size, mdType, mvType)) } return out } -func testData(prefix string, size int, mdType pdata.MetricDataType, mvType pdata.MetricValueType) pdata.Metrics { +func testData(prefix string, size int, mdType pmetric.MetricDataType, mvType pmetric.MetricValueType) pmetric.Metrics { c := goldendataset.MetricsCfg{ MetricDescriptorType: mdType, MetricValueType: mvType, diff --git a/processor/filterprocessor/filter_processor.go b/processor/filterprocessor/filter_processor.go index d47c35868832..c39f94bd8a1f 100644 --- a/processor/filterprocessor/filter_processor.go +++ b/processor/filterprocessor/filter_processor.go @@ -17,7 +17,8 @@ package filterprocessor // import "github.com/open-telemetry/opentelemetry-colle import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/zap" @@ -123,8 +124,8 @@ func createMatcher(mp *filtermetric.MatchProperties) (filtermetric.Matcher, filt } // processMetrics filters the given metrics based off the filterMetricProcessor's filters. -func (fmp *filterMetricProcessor) processMetrics(_ context.Context, pdm pdata.Metrics) (pdata.Metrics, error) { - pdm.ResourceMetrics().RemoveIf(func(rm pdata.ResourceMetrics) bool { +func (fmp *filterMetricProcessor) processMetrics(_ context.Context, pdm pmetric.Metrics) (pmetric.Metrics, error) { + pdm.ResourceMetrics().RemoveIf(func(rm pmetric.ResourceMetrics) bool { keepMetricsForResource := fmp.shouldKeepMetricsForResource(rm.Resource()) if !keepMetricsForResource { return true @@ -134,8 +135,8 @@ func (fmp *filterMetricProcessor) processMetrics(_ context.Context, pdm pdata.Me return false } - rm.ScopeMetrics().RemoveIf(func(ilm pdata.ScopeMetrics) bool { - ilm.Metrics().RemoveIf(func(m pdata.Metric) bool { + rm.ScopeMetrics().RemoveIf(func(ilm pmetric.ScopeMetrics) bool { + ilm.Metrics().RemoveIf(func(m pmetric.Metric) bool { keep, err := fmp.shouldKeepMetric(m) if err != nil { fmp.logger.Error("shouldKeepMetric failed", zap.Error(err)) @@ -155,7 +156,7 @@ func (fmp *filterMetricProcessor) processMetrics(_ context.Context, pdm pdata.Me return pdm, nil } -func (fmp *filterMetricProcessor) shouldKeepMetric(metric pdata.Metric) (bool, error) { +func (fmp *filterMetricProcessor) shouldKeepMetric(metric pmetric.Metric) (bool, error) { if fmp.include != nil { matches, err := fmp.include.MatchMetric(metric) if err != nil { @@ -180,7 +181,7 @@ func (fmp *filterMetricProcessor) shouldKeepMetric(metric pdata.Metric) (bool, e return true, nil } -func (fmp *filterMetricProcessor) shouldKeepMetricsForResource(resource pdata.Resource) bool { +func (fmp *filterMetricProcessor) shouldKeepMetricsForResource(resource pcommon.Resource) bool { resourceAttributes := resource.Attributes() if fmp.include != nil && fmp.includeAttribute != nil { diff --git a/processor/filterprocessor/filter_processor_logs.go b/processor/filterprocessor/filter_processor_logs.go index e3101e6bb811..ebfb5e63333f 100644 --- a/processor/filterprocessor/filter_processor_logs.go +++ b/processor/filterprocessor/filter_processor_logs.go @@ -17,7 +17,8 @@ package filterprocessor // import "github.com/open-telemetry/opentelemetry-colle import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/processor/processorhelper" "go.uber.org/zap" @@ -115,11 +116,11 @@ func getFilterConfigForMatchLevel(lp *LogMatchProperties, m MatchLevelType) []fi } } -func (flp *filterLogProcessor) ProcessLogs(ctx context.Context, logs pdata.Logs) (pdata.Logs, error) { +func (flp *filterLogProcessor) ProcessLogs(ctx context.Context, logs plog.Logs) (plog.Logs, error) { rLogs := logs.ResourceLogs() // Filter logs by resource level attributes - rLogs.RemoveIf(func(rm pdata.ResourceLogs) bool { + rLogs.RemoveIf(func(rm plog.ResourceLogs) bool { return flp.shouldSkipLogsForResource(rm.Resource()) }) @@ -133,24 +134,24 @@ func (flp *filterLogProcessor) ProcessLogs(ctx context.Context, logs pdata.Logs) return logs, nil } -func (flp *filterLogProcessor) filterByRecordAttributes(rLogs pdata.ResourceLogsSlice) { +func (flp *filterLogProcessor) filterByRecordAttributes(rLogs plog.ResourceLogsSlice) { for i := 0; i < rLogs.Len(); i++ { ills := rLogs.At(i).ScopeLogs() for j := 0; j < ills.Len(); j++ { ls := ills.At(j).LogRecords() - ls.RemoveIf(func(lr pdata.LogRecord) bool { + ls.RemoveIf(func(lr plog.LogRecord) bool { return flp.shouldSkipLogsForRecord(lr) }) } - ills.RemoveIf(func(sl pdata.ScopeLogs) bool { + ills.RemoveIf(func(sl plog.ScopeLogs) bool { return sl.LogRecords().Len() == 0 }) } - rLogs.RemoveIf(func(rl pdata.ResourceLogs) bool { + rLogs.RemoveIf(func(rl plog.ResourceLogs) bool { return rl.ScopeLogs().Len() == 0 }) } @@ -160,7 +161,7 @@ func (flp *filterLogProcessor) filterByRecordAttributes(rLogs pdata.ResourceLogs // False is returned when a log record should not be skipped. // The logic determining if a log record should be skipped is set in the // record attribute configuration. -func (flp *filterLogProcessor) shouldSkipLogsForRecord(lr pdata.LogRecord) bool { +func (flp *filterLogProcessor) shouldSkipLogsForRecord(lr plog.LogRecord) bool { if flp.includeRecords != nil { matches := flp.includeRecords.Match(lr.Attributes()) if !matches { @@ -182,7 +183,7 @@ func (flp *filterLogProcessor) shouldSkipLogsForRecord(lr pdata.LogRecord) bool // True is returned when a log should be skipped. // False is returned when a log should not be skipped. // The logic determining if a log should be skipped is set in the resource attribute configuration. -func (flp *filterLogProcessor) shouldSkipLogsForResource(resource pdata.Resource) bool { +func (flp *filterLogProcessor) shouldSkipLogsForResource(resource pcommon.Resource) bool { resourceAttributes := resource.Attributes() if flp.includeResources != nil { diff --git a/processor/filterprocessor/filter_processor_logs_test.go b/processor/filterprocessor/filter_processor_logs_test.go index 4b55bbf63565..f213affbc3b4 100644 --- a/processor/filterprocessor/filter_processor_logs_test.go +++ b/processor/filterprocessor/filter_processor_logs_test.go @@ -23,7 +23,8 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" ) @@ -32,7 +33,7 @@ type logNameTest struct { name string inc *LogMatchProperties exc *LogMatchProperties - inLogs pdata.Logs + inLogs plog.Logs outLN [][]string // output Log names per Resource } @@ -382,14 +383,14 @@ func TestFilterLogProcessor(t *testing.T) { } } -func testResourceLogs(lwrs []logWithResource) pdata.Logs { - ld := pdata.NewLogs() +func testResourceLogs(lwrs []logWithResource) plog.Logs { + ld := plog.NewLogs() for i, lwr := range lwrs { rl := ld.ResourceLogs().AppendEmpty() // Add resource level attribtues - pdata.NewMapFromRaw(lwr.resourceAttributes).CopyTo(rl.Resource().Attributes()) + pcommon.NewMapFromRaw(lwr.resourceAttributes).CopyTo(rl.Resource().Attributes()) ls := rl.ScopeLogs().AppendEmpty().LogRecords() for _, name := range lwr.logNames { l := ls.AppendEmpty() @@ -397,7 +398,7 @@ func testResourceLogs(lwrs []logWithResource) pdata.Logs { // Add record level attribtues for k := 0; k < ls.Len(); k++ { - pdata.NewMapFromRaw(lwrs[i].recordAttributes).CopyTo(ls.At(k).Attributes()) + pcommon.NewMapFromRaw(lwrs[i].recordAttributes).CopyTo(ls.At(k).Attributes()) } } } @@ -405,14 +406,14 @@ func testResourceLogs(lwrs []logWithResource) pdata.Logs { } func TestNilResourceLogs(t *testing.T) { - logs := pdata.NewLogs() + logs := plog.NewLogs() rls := logs.ResourceLogs() rls.AppendEmpty() requireNotPanicsLogs(t, logs) } func TestNilILL(t *testing.T) { - logs := pdata.NewLogs() + logs := plog.NewLogs() rls := logs.ResourceLogs() rl := rls.AppendEmpty() ills := rl.ScopeLogs() @@ -421,7 +422,7 @@ func TestNilILL(t *testing.T) { } func TestNilLog(t *testing.T) { - logs := pdata.NewLogs() + logs := plog.NewLogs() rls := logs.ResourceLogs() rl := rls.AppendEmpty() ills := rl.ScopeLogs() @@ -431,7 +432,7 @@ func TestNilLog(t *testing.T) { requireNotPanicsLogs(t, logs) } -func requireNotPanicsLogs(t *testing.T, logs pdata.Logs) { +func requireNotPanicsLogs(t *testing.T, logs plog.Logs) { factory := NewFactory() cfg := factory.CreateDefaultConfig() pcfg := cfg.(*Config) diff --git a/processor/filterprocessor/filter_processor_test.go b/processor/filterprocessor/filter_processor_test.go index ebfd7e8fe92d..e72b0126cf39 100644 --- a/processor/filterprocessor/filter_processor_test.go +++ b/processor/filterprocessor/filter_processor_test.go @@ -25,7 +25,8 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/goldendataset" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" @@ -36,7 +37,7 @@ type metricNameTest struct { name string inc *filtermetric.MatchProperties exc *filtermetric.MatchProperties - inMetrics pdata.Metrics + inMetrics pmetric.Metrics outMN [][]string // output Metric names per Resource allMetricsFiltered bool } @@ -378,20 +379,20 @@ func TestFilterMetricProcessor(t *testing.T) { } } -func testResourceMetrics(mwrs []metricWithResource) pdata.Metrics { - md := pdata.NewMetrics() +func testResourceMetrics(mwrs []metricWithResource) pmetric.Metrics { + md := pmetric.NewMetrics() now := time.Now() for _, mwr := range mwrs { rm := md.ResourceMetrics().AppendEmpty() - pdata.NewMapFromRaw(mwr.resourceAttributes).CopyTo(rm.Resource().Attributes()) + pcommon.NewMapFromRaw(mwr.resourceAttributes).CopyTo(rm.Resource().Attributes()) ms := rm.ScopeMetrics().AppendEmpty().Metrics() for _, name := range mwr.metricNames { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(10 * time.Second))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) dp.SetDoubleVal(123) } } @@ -446,8 +447,8 @@ func benchmarkFilter(b *testing.B, mp *filtermetric.MatchProperties) { } } -func metricSlice(numMetrics int) []pdata.Metrics { - var out []pdata.Metrics +func metricSlice(numMetrics int) []pmetric.Metrics { + var out []pmetric.Metrics for i := 0; i < numMetrics; i++ { const size = 2 out = append(out, pdm(fmt.Sprintf("p%d_", i), size)) @@ -455,10 +456,10 @@ func metricSlice(numMetrics int) []pdata.Metrics { return out } -func pdm(prefix string, size int) pdata.Metrics { +func pdm(prefix string, size int) pmetric.Metrics { c := goldendataset.MetricsCfg{ - MetricDescriptorType: pdata.MetricDataTypeGauge, - MetricValueType: pdata.MetricValueTypeInt, + MetricDescriptorType: pmetric.MetricDataTypeGauge, + MetricValueType: pmetric.MetricValueTypeInt, MetricNamePrefix: prefix, NumILMPerResource: size, NumMetricsPerILM: size, @@ -471,14 +472,14 @@ func pdm(prefix string, size int) pdata.Metrics { } func TestNilResourceMetrics(t *testing.T) { - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() rms := metrics.ResourceMetrics() rms.AppendEmpty() requireNotPanics(t, metrics) } func TestNilILM(t *testing.T) { - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() rms := metrics.ResourceMetrics() rm := rms.AppendEmpty() ilms := rm.ScopeMetrics() @@ -487,7 +488,7 @@ func TestNilILM(t *testing.T) { } func TestNilMetric(t *testing.T) { - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() rms := metrics.ResourceMetrics() rm := rms.AppendEmpty() ilms := rm.ScopeMetrics() @@ -497,7 +498,7 @@ func TestNilMetric(t *testing.T) { requireNotPanics(t, metrics) } -func requireNotPanics(t *testing.T, metrics pdata.Metrics) { +func requireNotPanics(t *testing.T, metrics pmetric.Metrics) { factory := NewFactory() cfg := factory.CreateDefaultConfig() pcfg := cfg.(*Config) diff --git a/processor/filterprocessor/go.mod b/processor/filterprocessor/go.mod index 9dbd93c37a36..e86836966dc6 100644 --- a/processor/filterprocessor/go.mod +++ b/processor/filterprocessor/go.mod @@ -5,8 +5,8 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -15,7 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -24,6 +24,7 @@ require ( github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -34,3 +35,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/filterprocessor/go.sum b/processor/filterprocessor/go.sum index d52328446b66..750ad79e4cac 100644 --- a/processor/filterprocessor/go.sum +++ b/processor/filterprocessor/go.sum @@ -18,7 +18,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -76,7 +76,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -106,8 +105,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -176,10 +175,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -219,7 +220,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -243,7 +244,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/groupbyattrsprocessor/attribute_groups.go b/processor/groupbyattrsprocessor/attribute_groups.go index 85666cd65d01..4f33a833dc9a 100644 --- a/processor/groupbyattrsprocessor/attribute_groups.go +++ b/processor/groupbyattrsprocessor/attribute_groups.go @@ -15,16 +15,19 @@ package groupbyattrsprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) -func instrumentationLibrariesEqual(il1, il2 pdata.InstrumentationScope) bool { +func instrumentationLibrariesEqual(il1, il2 pcommon.InstrumentationScope) bool { return il1.Name() == il2.Name() && il1.Version() == il2.Version() } -// matchingScopeSpans searches for a pdata.ScopeSpans instance matching +// matchingScopeSpans searches for a ptrace.ScopeSpans instance matching // given InstrumentationScope. If nothing is found, it creates a new one -func matchingScopeSpans(rl pdata.ResourceSpans, library pdata.InstrumentationScope) pdata.ScopeSpans { +func matchingScopeSpans(rl ptrace.ResourceSpans, library pcommon.InstrumentationScope) ptrace.ScopeSpans { ilss := rl.ScopeSpans() for i := 0; i < ilss.Len(); i++ { ils := ilss.At(i) @@ -38,9 +41,9 @@ func matchingScopeSpans(rl pdata.ResourceSpans, library pdata.InstrumentationSco return ils } -// matchingScopeLogs searches for a pdata.ScopeLogs instance matching +// matchingScopeLogs searches for a plog.ScopeLogs instance matching // given InstrumentationScope. If nothing is found, it creates a new one -func matchingScopeLogs(rl pdata.ResourceLogs, library pdata.InstrumentationScope) pdata.ScopeLogs { +func matchingScopeLogs(rl plog.ResourceLogs, library pcommon.InstrumentationScope) plog.ScopeLogs { ills := rl.ScopeLogs() for i := 0; i < ills.Len(); i++ { sl := ills.At(i) @@ -54,9 +57,9 @@ func matchingScopeLogs(rl pdata.ResourceLogs, library pdata.InstrumentationScope return sl } -// matchingScopeMetrics searches for a pdata.ScopeMetrics instance matching +// matchingScopeMetrics searches for a pmetric.ScopeMetrics instance matching // given InstrumentationScope. If nothing is found, it creates a new one -func matchingScopeMetrics(rm pdata.ResourceMetrics, library pdata.InstrumentationScope) pdata.ScopeMetrics { +func matchingScopeMetrics(rm pmetric.ResourceMetrics, library pcommon.InstrumentationScope) pmetric.ScopeMetrics { ilms := rm.ScopeMetrics() for i := 0; i < ilms.Len(); i++ { ilm := ilms.At(i) @@ -72,52 +75,52 @@ func matchingScopeMetrics(rm pdata.ResourceMetrics, library pdata.Instrumentatio // spansGroupedByAttrs keeps all found grouping attributes for spans, together with the matching records type spansGroupedByAttrs struct { - pdata.ResourceSpansSlice + ptrace.ResourceSpansSlice } // logsGroupedByAttrs keeps all found grouping attributes for logs, together with the matching records type logsGroupedByAttrs struct { - pdata.ResourceLogsSlice + plog.ResourceLogsSlice } // metricsGroupedByAttrs keeps all found grouping attributes for metrics, together with the matching records type metricsGroupedByAttrs struct { - pdata.ResourceMetricsSlice + pmetric.ResourceMetricsSlice } func newLogsGroupedByAttrs() *logsGroupedByAttrs { return &logsGroupedByAttrs{ - ResourceLogsSlice: pdata.NewResourceLogsSlice(), + ResourceLogsSlice: plog.NewResourceLogsSlice(), } } func newSpansGroupedByAttrs() *spansGroupedByAttrs { return &spansGroupedByAttrs{ - ResourceSpansSlice: pdata.NewResourceSpansSlice(), + ResourceSpansSlice: ptrace.NewResourceSpansSlice(), } } func newMetricsGroupedByAttrs() *metricsGroupedByAttrs { return &metricsGroupedByAttrs{ - ResourceMetricsSlice: pdata.NewResourceMetricsSlice(), + ResourceMetricsSlice: pmetric.NewResourceMetricsSlice(), } } // Build the Attributes that we'll be looking for in existing Resources as a merge of the Attributes // of the original Resource with the requested Attributes -func buildReferenceAttributes(originResource pdata.Resource, requiredAttributes pdata.Map) pdata.Map { - referenceAttributes := pdata.NewMap() +func buildReferenceAttributes(originResource pcommon.Resource, requiredAttributes pcommon.Map) pcommon.Map { + referenceAttributes := pcommon.NewMap() originResource.Attributes().CopyTo(referenceAttributes) - requiredAttributes.Range(func(k string, v pdata.Value) bool { + requiredAttributes.Range(func(k string, v pcommon.Value) bool { referenceAttributes.Upsert(k, v) return true }) return referenceAttributes } -// resourceMatches verifies if given pdata.Resource attributes strictly match with the specified +// resourceMatches verifies if given pcommon.Resource attributes strictly match with the specified // reference Attributes (all attributes must match strictly) -func resourceMatches(resource pdata.Resource, referenceAttributes pdata.Map) bool { +func resourceMatches(resource pcommon.Resource, referenceAttributes pcommon.Map) bool { // If not the same number of attributes, it doesn't match if referenceAttributes.Len() != resource.Attributes().Len() { @@ -126,7 +129,7 @@ func resourceMatches(resource pdata.Resource, referenceAttributes pdata.Map) boo // Go through each attribute and check the corresponding attribute value in the tested Resource matching := true - referenceAttributes.Range(func(referenceKey string, referenceValue pdata.Value) bool { + referenceAttributes.Range(func(referenceKey string, referenceValue pcommon.Value) bool { testedValue, foundKey := resource.Attributes().Get(referenceKey) if !foundKey || !referenceValue.Equal(testedValue) { // One difference is enough to consider it doesn't match, so fail early @@ -139,49 +142,49 @@ func resourceMatches(resource pdata.Resource, referenceAttributes pdata.Map) boo return matching } -// findResource searches for an existing pdata.ResourceLogs that strictly matches with the specified reference -// Attributes. Returns the matching pdata.ResourceLogs and bool value which is set to true if found -func (lgba logsGroupedByAttrs) findResource(referenceAttributes pdata.Map) (pdata.ResourceLogs, bool) { +// findResource searches for an existing plog.ResourceLogs that strictly matches with the specified reference +// Attributes. Returns the matching plog.ResourceLogs and bool value which is set to true if found +func (lgba logsGroupedByAttrs) findResource(referenceAttributes pcommon.Map) (plog.ResourceLogs, bool) { for i := 0; i < lgba.Len(); i++ { if resourceMatches(lgba.At(i).Resource(), referenceAttributes) { return lgba.At(i), true } } - return pdata.ResourceLogs{}, false + return plog.ResourceLogs{}, false } -// findResource searches for an existing pdata.ResourceLogs that strictly matches with the specified reference -// Attributes. Returns the matching pdata.ResourceLogs and bool value which is set to true if found -func (sgba spansGroupedByAttrs) findResource(referenceAttributes pdata.Map) (pdata.ResourceSpans, bool) { +// findResource searches for an existing plog.ResourceLogs that strictly matches with the specified reference +// Attributes. Returns the matching plog.ResourceLogs and bool value which is set to true if found +func (sgba spansGroupedByAttrs) findResource(referenceAttributes pcommon.Map) (ptrace.ResourceSpans, bool) { for i := 0; i < sgba.Len(); i++ { if resourceMatches(sgba.At(i).Resource(), referenceAttributes) { return sgba.At(i), true } } - return pdata.ResourceSpans{}, false + return ptrace.ResourceSpans{}, false } -// findResource searches for an existing pdata.ResourceMetrics that strictly matches with the specified reference -// Attributes. Returns the matching pdata.ResourceMetrics and bool value which is set to true if found -func (mgba metricsGroupedByAttrs) findResource(referenceAttributes pdata.Map) (pdata.ResourceMetrics, bool) { +// findResource searches for an existing pmetric.ResourceMetrics that strictly matches with the specified reference +// Attributes. Returns the matching pmetric.ResourceMetrics and bool value which is set to true if found +func (mgba metricsGroupedByAttrs) findResource(referenceAttributes pcommon.Map) (pmetric.ResourceMetrics, bool) { for i := 0; i < mgba.Len(); i++ { if resourceMatches(mgba.At(i).Resource(), referenceAttributes) { return mgba.At(i), true } } - return pdata.ResourceMetrics{}, false + return pmetric.ResourceMetrics{}, false } // Update the specified (and new) Resource with the properties of the original Resource, and with the // required Attributes -func updateResourceToMatch(newResource pdata.Resource, originResource pdata.Resource, requiredAttributes pdata.Map) { +func updateResourceToMatch(newResource pcommon.Resource, originResource pcommon.Resource, requiredAttributes pcommon.Map) { originResource.CopyTo(newResource) // This prioritizes required attributes over the original resource attributes, if they overlap attrs := newResource.Attributes() - requiredAttributes.Range(func(k string, v pdata.Value) bool { + requiredAttributes.Range(func(k string, v pcommon.Value) bool { attrs.Upsert(k, v) return true }) @@ -189,7 +192,7 @@ func updateResourceToMatch(newResource pdata.Resource, originResource pdata.Reso } // findOrCreateResource searches for a Resource with matching attributes and returns it. If nothing is found, it is being created -func (sgba *spansGroupedByAttrs) findOrCreateResource(originResource pdata.Resource, requiredAttributes pdata.Map) pdata.ResourceSpans { +func (sgba *spansGroupedByAttrs) findOrCreateResource(originResource pcommon.Resource, requiredAttributes pcommon.Map) ptrace.ResourceSpans { // Build the reference attributes that we're looking for in Resources referenceAttributes := buildReferenceAttributes(originResource, requiredAttributes) @@ -208,7 +211,7 @@ func (sgba *spansGroupedByAttrs) findOrCreateResource(originResource pdata.Resou } // findResourceOrElseCreate searches for a Resource with matching attributes and returns it. If nothing is found, it is being created -func (lgba *logsGroupedByAttrs) findResourceOrElseCreate(originResource pdata.Resource, requiredAttributes pdata.Map) pdata.ResourceLogs { +func (lgba *logsGroupedByAttrs) findResourceOrElseCreate(originResource pcommon.Resource, requiredAttributes pcommon.Map) plog.ResourceLogs { // Build the reference attributes that we're looking for in Resources referenceAttributes := buildReferenceAttributes(originResource, requiredAttributes) @@ -227,7 +230,7 @@ func (lgba *logsGroupedByAttrs) findResourceOrElseCreate(originResource pdata.Re } // findResourceOrElseCreate searches for a Resource with matching attributes and returns it. If nothing is found, it is being created -func (mgba *metricsGroupedByAttrs) findResourceOrElseCreate(originResource pdata.Resource, requiredAttributes pdata.Map) pdata.ResourceMetrics { +func (mgba *metricsGroupedByAttrs) findResourceOrElseCreate(originResource pcommon.Resource, requiredAttributes pcommon.Map) pmetric.ResourceMetrics { // Build the reference attributes that we're looking for in Resources referenceAttributes := buildReferenceAttributes(originResource, requiredAttributes) diff --git a/processor/groupbyattrsprocessor/attribute_groups_test.go b/processor/groupbyattrsprocessor/attribute_groups_test.go index 73c364ff35c5..ca095758f653 100644 --- a/processor/groupbyattrsprocessor/attribute_groups_test.go +++ b/processor/groupbyattrsprocessor/attribute_groups_test.go @@ -20,23 +20,26 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) -func simpleResource() pdata.Resource { - rs := pdata.NewResource() - rs.Attributes().Insert("somekey1", pdata.NewValueString("some-string-value")) - rs.Attributes().Insert("somekey2", pdata.NewValueInt(123)) +func simpleResource() pcommon.Resource { + rs := pcommon.NewResource() + rs.Attributes().Insert("somekey1", pcommon.NewValueString("some-string-value")) + rs.Attributes().Insert("somekey2", pcommon.NewValueInt(123)) for i := 0; i < 10; i++ { k := fmt.Sprint("random-", i) v := fmt.Sprint("value-", rand.Intn(100)) - rs.Attributes().Insert(k, pdata.NewValueString(v)) + rs.Attributes().Insert(k, pcommon.NewValueString(v)) } return rs } -func randomAttributeMap() pdata.Map { - attrs := pdata.NewMap() +func randomAttributeMap() pcommon.Map { + attrs := pcommon.NewMap() for i := 0; i < 10; i++ { k := fmt.Sprint("key-", i) v := fmt.Sprint("value-", rand.Intn(500000)) @@ -45,8 +48,8 @@ func randomAttributeMap() pdata.Map { return attrs } -func randomGroups(count int) []pdata.Map { - entries := make([]pdata.Map, count) +func randomGroups(count int) []pcommon.Map { + entries := make([]pcommon.Map, count) for i := 0; i < count; i++ { entries[i] = randomAttributeMap() } @@ -63,34 +66,34 @@ var ( func TestResourceAttributeScenarios(t *testing.T) { tests := []struct { name string - baseResource pdata.Resource - fillRecordAttributesFun func(attributeMap pdata.Map) - fillExpectedResourceFun func(baseResource pdata.Resource, expectedResource pdata.Resource) + baseResource pcommon.Resource + fillRecordAttributesFun func(attributeMap pcommon.Map) + fillExpectedResourceFun func(baseResource pcommon.Resource, expectedResource pcommon.Resource) }{ { name: "When the same key is present at Resource and Record level, the latter value should be used", baseResource: simpleResource(), - fillRecordAttributesFun: func(attributeMap pdata.Map) { + fillRecordAttributesFun: func(attributeMap pcommon.Map) { attributeMap.InsertString("somekey1", "replaced-value") }, - fillExpectedResourceFun: func(baseResource pdata.Resource, expectedResource pdata.Resource) { + fillExpectedResourceFun: func(baseResource pcommon.Resource, expectedResource pcommon.Resource) { baseResource.CopyTo(expectedResource) expectedResource.Attributes().UpdateString("somekey1", "replaced-value") }, }, { name: "Empty Resource and attributes", - baseResource: pdata.NewResource(), + baseResource: pcommon.NewResource(), fillRecordAttributesFun: nil, fillExpectedResourceFun: nil, }, { name: "Empty Resource", - baseResource: pdata.NewResource(), - fillRecordAttributesFun: func(attributeMap pdata.Map) { + baseResource: pcommon.NewResource(), + fillRecordAttributesFun: func(attributeMap pcommon.Map) { attributeMap.InsertString("somekey1", "some-value") }, - fillExpectedResourceFun: func(_ pdata.Resource, expectedResource pdata.Resource) { + fillExpectedResourceFun: func(_ pcommon.Resource, expectedResource pcommon.Resource) { expectedResource.Attributes().InsertString("somekey1", "some-value") }, }, @@ -98,7 +101,7 @@ func TestResourceAttributeScenarios(t *testing.T) { name: "Empty Attributes", baseResource: simpleResource(), fillRecordAttributesFun: nil, - fillExpectedResourceFun: func(baseResource pdata.Resource, expectedResource pdata.Resource) { + fillExpectedResourceFun: func(baseResource pcommon.Resource, expectedResource pcommon.Resource) { baseResource.CopyTo(expectedResource) }, }, @@ -106,12 +109,12 @@ func TestResourceAttributeScenarios(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - recordAttributeMap := pdata.NewMap() + recordAttributeMap := pcommon.NewMap() if tt.fillRecordAttributesFun != nil { tt.fillRecordAttributesFun(recordAttributeMap) } - expectedResource := pdata.NewResource() + expectedResource := pcommon.NewResource() if tt.fillExpectedResourceFun != nil { tt.fillExpectedResourceFun(tt.baseResource, expectedResource) } @@ -123,13 +126,13 @@ func TestResourceAttributeScenarios(t *testing.T) { } func TestInstrumentationLibraryMatching(t *testing.T) { - rl := pdata.NewResourceLogs() - rs := pdata.NewResourceSpans() - rm := pdata.NewResourceMetrics() + rl := plog.NewResourceLogs() + rs := ptrace.NewResourceSpans() + rm := pmetric.NewResourceMetrics() - il1 := pdata.NewInstrumentationScope() + il1 := pcommon.NewInstrumentationScope() il1.SetName("Name1") - il2 := pdata.NewInstrumentationScope() + il2 := pcommon.NewInstrumentationScope() il2.SetName("Name2") ill1 := matchingScopeLogs(rl, il1) diff --git a/processor/groupbyattrsprocessor/go.mod b/processor/groupbyattrsprocessor/go.mod index 524e52257be8..e663069be893 100644 --- a/processor/groupbyattrsprocessor/go.mod +++ b/processor/groupbyattrsprocessor/go.mod @@ -6,16 +6,15 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -23,18 +22,11 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) @@ -42,3 +34,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor => ../../processor/groupbytraceprocessor replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal => ../../pkg/batchpersignal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/groupbyattrsprocessor/go.sum b/processor/groupbyattrsprocessor/go.sum index 74701575c476..a9177b675ef0 100644 --- a/processor/groupbyattrsprocessor/go.sum +++ b/processor/groupbyattrsprocessor/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,18 +15,10 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -37,16 +26,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -63,18 +48,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -84,13 +65,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -120,8 +98,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -159,21 +137,16 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -183,20 +156,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -220,20 +192,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -249,21 +217,17 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -284,22 +248,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -309,19 +267,13 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/processor/groupbyattrsprocessor/processor.go b/processor/groupbyattrsprocessor/processor.go index d5ab2beb9ce8..c819a7c661a4 100644 --- a/processor/groupbyattrsprocessor/processor.go +++ b/processor/groupbyattrsprocessor/processor.go @@ -18,7 +18,10 @@ import ( "context" "go.opencensus.io/stats" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -28,7 +31,7 @@ type groupByAttrsProcessor struct { } // ProcessTraces process traces and groups traces by attribute. -func (gap *groupByAttrsProcessor) processTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { +func (gap *groupByAttrsProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() groupedResourceSpans := newSpansGroupedByAttrs() @@ -61,14 +64,14 @@ func (gap *groupByAttrsProcessor) processTraces(ctx context.Context, td pdata.Tr } // Copy the grouped data into output - groupedTraces := pdata.NewTraces() + groupedTraces := ptrace.NewTraces() groupedResourceSpans.MoveAndAppendTo(groupedTraces.ResourceSpans()) stats.Record(ctx, mDistSpanGroups.M(int64(groupedTraces.ResourceSpans().Len()))) return groupedTraces, nil } -func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rl := ld.ResourceLogs() groupedResourceLogs := newLogsGroupedByAttrs() @@ -102,14 +105,14 @@ func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld pdata.Logs } // Copy the grouped data into output - groupedLogs := pdata.NewLogs() + groupedLogs := plog.NewLogs() groupedResourceLogs.MoveAndAppendTo(groupedLogs.ResourceLogs()) stats.Record(ctx, mDistLogGroups.M(int64(groupedLogs.ResourceLogs().Len()))) return groupedLogs, nil } -func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() groupedResourceMetrics := newMetricsGroupedByAttrs() @@ -124,35 +127,35 @@ func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pdata.M switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ { dataPoint := metric.Gauge().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, groupedResourceMetrics, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Gauge().DataPoints().AppendEmpty()) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: for pointIndex := 0; pointIndex < metric.Sum().DataPoints().Len(); pointIndex++ { dataPoint := metric.Sum().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, groupedResourceMetrics, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Sum().DataPoints().AppendEmpty()) } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: for pointIndex := 0; pointIndex < metric.Summary().DataPoints().Len(); pointIndex++ { dataPoint := metric.Summary().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, groupedResourceMetrics, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Summary().DataPoints().AppendEmpty()) } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: for pointIndex := 0; pointIndex < metric.Histogram().DataPoints().Len(); pointIndex++ { dataPoint := metric.Histogram().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, groupedResourceMetrics, rm, ilm, metric, dataPoint.Attributes()) dataPoint.CopyTo(groupedMetric.Histogram().DataPoints().AppendEmpty()) } - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: for pointIndex := 0; pointIndex < metric.ExponentialHistogram().DataPoints().Len(); pointIndex++ { dataPoint := metric.ExponentialHistogram().DataPoints().At(pointIndex) groupedMetric := gap.getGroupedMetricsFromAttributes(ctx, groupedResourceMetrics, rm, ilm, metric, dataPoint.Attributes()) @@ -165,15 +168,15 @@ func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pdata.M } // Copy the grouped data into output - groupedMetrics := pdata.NewMetrics() + groupedMetrics := pmetric.NewMetrics() groupedResourceMetrics.MoveAndAppendTo(groupedMetrics.ResourceMetrics()) stats.Record(ctx, mDistMetricGroups.M(int64(groupedMetrics.ResourceMetrics().Len()))) return groupedMetrics, nil } -func deleteAttributes(attrsForRemoval, targetAttrs pdata.Map) { - attrsForRemoval.Range(func(key string, _ pdata.Value) bool { +func deleteAttributes(attrsForRemoval, targetAttrs pcommon.Map) { + attrsForRemoval.Range(func(key string, _ pcommon.Value) bool { targetAttrs.Delete(key) return true }) @@ -184,9 +187,9 @@ func deleteAttributes(attrsForRemoval, targetAttrs pdata.Map) { // Returns: // - whether any attribute matched (true) or none (false) // - the extracted AttributeMap of matching keys and their corresponding values -func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pdata.Map) (bool, pdata.Map) { +func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) (bool, pcommon.Map) { - groupingAttributes := pdata.NewMap() + groupingAttributes := pcommon.NewMap() foundMatch := false for _, attrKey := range gap.groupByKeys { @@ -201,7 +204,7 @@ func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pdata.Map) ( } // Searches for metric with same name in the specified InstrumentationLibrary and returns it. If nothing is found, create it. -func getMetricInInstrumentationLibrary(ilm pdata.ScopeMetrics, searchedMetric pdata.Metric) pdata.Metric { +func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric { // Loop through all metrics and try to find the one that matches with the one we search for // (name and type) @@ -226,11 +229,11 @@ func getMetricInInstrumentationLibrary(ilm pdata.ScopeMetrics, searchedMetric pd func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes( ctx context.Context, groupedResourceMetrics *metricsGroupedByAttrs, - originResourceMetrics pdata.ResourceMetrics, - ilm pdata.ScopeMetrics, - metric pdata.Metric, - attributes pdata.Map, -) pdata.Metric { + originResourceMetrics pmetric.ResourceMetrics, + ilm pmetric.ScopeMetrics, + metric pmetric.Metric, + attributes pcommon.Map, +) pmetric.Metric { toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(attributes) if toBeGrouped { diff --git a/processor/groupbyattrsprocessor/processor_test.go b/processor/groupbyattrsprocessor/processor_test.go index a8178b76433f..ad8f5f82a6bf 100644 --- a/processor/groupbyattrsprocessor/processor_test.go +++ b/processor/groupbyattrsprocessor/processor_test.go @@ -21,7 +21,10 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -29,21 +32,21 @@ var ( attrMap = prepareAttributeMap() ) -func prepareAttributeMap() pdata.Map { +func prepareAttributeMap() pcommon.Map { attributeValues := map[string]interface{}{ "xx": "aa", "yy": 11, } - am := pdata.NewMap() - pdata.NewMapFromRaw(attributeValues).CopyTo(am) + am := pcommon.NewMap() + pcommon.NewMapFromRaw(attributeValues).CopyTo(am) am.Sort() return am } -func prepareResource(attrMap pdata.Map, selectedKeys []string) pdata.Resource { - res := pdata.NewResource() +func prepareResource(attrMap pcommon.Map, selectedKeys []string) pcommon.Resource { + res := pcommon.NewResource() for _, key := range selectedKeys { val, found := attrMap.Get(key) if found { @@ -54,8 +57,8 @@ func prepareResource(attrMap pdata.Map, selectedKeys []string) pdata.Resource { return res } -func filterAttributeMap(attrMap pdata.Map, selectedKeys []string) pdata.Map { - filteredAttrMap := pdata.NewMap() +func filterAttributeMap(attrMap pcommon.Map, selectedKeys []string) pcommon.Map { + filteredAttrMap := pcommon.NewMap() if len(selectedKeys) == 0 { return filteredAttrMap } @@ -69,8 +72,8 @@ func filterAttributeMap(attrMap pdata.Map, selectedKeys []string) pdata.Map { return filteredAttrMap } -func someComplexLogs(withResourceAttrIndex bool, rlCount int, illCount int) pdata.Logs { - logs := pdata.NewLogs() +func someComplexLogs(withResourceAttrIndex bool, rlCount int, illCount int) plog.Logs { + logs := plog.NewLogs() for i := 0; i < rlCount; i++ { rl := logs.ResourceLogs().AppendEmpty() @@ -89,8 +92,8 @@ func someComplexLogs(withResourceAttrIndex bool, rlCount int, illCount int) pdat return logs } -func someComplexTraces(withResourceAttrIndex bool, rsCount int, ilsCount int) pdata.Traces { - traces := pdata.NewTraces() +func someComplexTraces(withResourceAttrIndex bool, rsCount int, ilsCount int) ptrace.Traces { + traces := ptrace.NewTraces() for i := 0; i < rsCount; i++ { rs := traces.ResourceSpans().AppendEmpty() @@ -109,8 +112,8 @@ func someComplexTraces(withResourceAttrIndex bool, rsCount int, ilsCount int) pd return traces } -func someComplexMetrics(withResourceAttrIndex bool, rmCount int, ilmCount int, dataPointCount int) pdata.Metrics { - metrics := pdata.NewMetrics() +func someComplexMetrics(withResourceAttrIndex bool, rmCount int, ilmCount int, dataPointCount int) pmetric.Metrics { + metrics := pmetric.NewMetrics() for i := 0; i < rmCount; i++ { rm := metrics.ResourceMetrics().AppendEmpty() @@ -121,11 +124,11 @@ func someComplexMetrics(withResourceAttrIndex bool, rmCount int, ilmCount int, d for j := 0; j < ilmCount; j++ { metric := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() metric.SetName(fmt.Sprintf("foo-%d-%d", i, j)) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) for k := 0; k < dataPointCount; k++ { dataPoint := metric.Gauge().DataPoints().AppendEmpty() - dataPoint.SetTimestamp(pdata.NewTimestampFromTime(time.Now())) + dataPoint.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) dataPoint.SetIntVal(int64(k)) dataPoint.Attributes().InsertString("commonGroupedAttr", "abc") dataPoint.Attributes().InsertString("commonNonGroupedAttr", "xyz") @@ -136,8 +139,8 @@ func someComplexMetrics(withResourceAttrIndex bool, rmCount int, ilmCount int, d return metrics } -func assertResourceContainsAttributes(t *testing.T, resource pdata.Resource, attributeMap pdata.Map) { - attributeMap.Range(func(k string, v pdata.Value) bool { +func assertResourceContainsAttributes(t *testing.T, resource pcommon.Resource, attributeMap pcommon.Map) { + attributeMap.Range(func(k string, v pcommon.Value) bool { rv, found := resource.Attributes().Get(k) assert.True(t, found) assert.Equal(t, v, rv) @@ -240,8 +243,8 @@ func TestComplexAttributeGrouping(t *testing.T) { assert.NoError(t, err) // Following are record-level attributes that should be preserved after processing - outputRecordAttrs := pdata.NewMap() - outputResourceAttrs := pdata.NewMap() + outputRecordAttrs := pcommon.NewMap() + outputResourceAttrs := pcommon.NewMap() if tt.shouldMoveCommonGroupedAttr { // This was present at record level and should be found on Resource level after the processor outputResourceAttrs.InsertString("commonGroupedAttr", "abc") @@ -385,7 +388,7 @@ func TestAttributeGrouping(t *testing.T) { assert.Equal(t, 1, processedHistogramMetrics.ResourceMetrics().Len()) assert.Equal(t, 1, processedExponentialHistogramMetrics.ResourceMetrics().Len()) - resources := []pdata.Resource{ + resources := []pcommon.Resource{ processedLogs.ResourceLogs().At(0).Resource(), processedSpans.ResourceSpans().At(0).Resource(), processedGaugeMetrics.ResourceMetrics().At(0).Resource(), @@ -460,8 +463,8 @@ func TestAttributeGrouping(t *testing.T) { } } -func someSpans(attrs pdata.Map, instrumentationLibraryCount int, spanCount int) pdata.Traces { - traces := pdata.NewTraces() +func someSpans(attrs pcommon.Map, instrumentationLibraryCount int, spanCount int) ptrace.Traces { + traces := ptrace.NewTraces() for i := 0; i < instrumentationLibraryCount; i++ { ilName := fmt.Sprint("ils-", i) @@ -476,8 +479,8 @@ func someSpans(attrs pdata.Map, instrumentationLibraryCount int, spanCount int) return traces } -func someLogs(attrs pdata.Map, instrumentationLibraryCount int, logCount int) pdata.Logs { - logs := pdata.NewLogs() +func someLogs(attrs pcommon.Map, instrumentationLibraryCount int, logCount int) plog.Logs { + logs := plog.NewLogs() for i := 0; i < instrumentationLibraryCount; i++ { ilName := fmt.Sprint("ils-", i) @@ -492,8 +495,8 @@ func someLogs(attrs pdata.Map, instrumentationLibraryCount int, logCount int) pd return logs } -func someGaugeMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCount int) pdata.Metrics { - metrics := pdata.NewMetrics() +func someGaugeMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { + metrics := pmetric.NewMetrics() for i := 0; i < instrumentationLibraryCount; i++ { ilName := fmt.Sprint("ils-", i) @@ -502,7 +505,7 @@ func someGaugeMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCo ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() metric.SetName(fmt.Sprint("gauge-", j)) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) dataPoint := metric.Gauge().DataPoints().AppendEmpty() attrs.CopyTo(dataPoint.Attributes()) } @@ -510,8 +513,8 @@ func someGaugeMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCo return metrics } -func someSumMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCount int) pdata.Metrics { - metrics := pdata.NewMetrics() +func someSumMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { + metrics := pmetric.NewMetrics() for i := 0; i < instrumentationLibraryCount; i++ { ilName := fmt.Sprint("ils-", i) @@ -520,7 +523,7 @@ func someSumMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCoun ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() metric.SetName(fmt.Sprint("sum-", j)) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) dataPoint := metric.Sum().DataPoints().AppendEmpty() attrs.CopyTo(dataPoint.Attributes()) } @@ -528,8 +531,8 @@ func someSumMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCoun return metrics } -func someSummaryMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCount int) pdata.Metrics { - metrics := pdata.NewMetrics() +func someSummaryMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { + metrics := pmetric.NewMetrics() for i := 0; i < instrumentationLibraryCount; i++ { ilName := fmt.Sprint("ils-", i) @@ -538,7 +541,7 @@ func someSummaryMetrics(attrs pdata.Map, instrumentationLibraryCount int, metric ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() metric.SetName(fmt.Sprint("summary-", j)) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) dataPoint := metric.Summary().DataPoints().AppendEmpty() attrs.CopyTo(dataPoint.Attributes()) } @@ -546,8 +549,8 @@ func someSummaryMetrics(attrs pdata.Map, instrumentationLibraryCount int, metric return metrics } -func someHistogramMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCount int) pdata.Metrics { - metrics := pdata.NewMetrics() +func someHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { + metrics := pmetric.NewMetrics() for i := 0; i < instrumentationLibraryCount; i++ { ilName := fmt.Sprint("ils-", i) @@ -556,7 +559,7 @@ func someHistogramMetrics(attrs pdata.Map, instrumentationLibraryCount int, metr ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() metric.SetName(fmt.Sprint("histogram-", j)) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) dataPoint := metric.Histogram().DataPoints().AppendEmpty() attrs.CopyTo(dataPoint.Attributes()) } @@ -564,8 +567,8 @@ func someHistogramMetrics(attrs pdata.Map, instrumentationLibraryCount int, metr return metrics } -func someExponentialHistogramMetrics(attrs pdata.Map, instrumentationLibraryCount int, metricCount int) pdata.Metrics { - metrics := pdata.NewMetrics() +func someExponentialHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCount int, metricCount int) pmetric.Metrics { + metrics := pmetric.NewMetrics() for i := 0; i < instrumentationLibraryCount; i++ { ilName := fmt.Sprint("ils-", i) @@ -574,7 +577,7 @@ func someExponentialHistogramMetrics(attrs pdata.Map, instrumentationLibraryCoun ilm.Scope().SetName(ilName) metric := ilm.Metrics().AppendEmpty() metric.SetName(fmt.Sprint("exponential-histogram-", j)) - metric.SetDataType(pdata.MetricDataTypeExponentialHistogram) + metric.SetDataType(pmetric.MetricDataTypeExponentialHistogram) dataPoint := metric.ExponentialHistogram().DataPoints().AppendEmpty() attrs.CopyTo(dataPoint.Attributes()) } @@ -633,7 +636,7 @@ func TestMetricAdvancedGrouping(t *testing.T) { // Metric "mixed-type" (GAUGE) // DataPoint {id="eth0"} - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() resourceMetrics := metrics.ResourceMetrics().AppendEmpty() resourceMetrics.Resource().Attributes().UpsertString("host.name", "localhost") @@ -642,7 +645,7 @@ func TestMetricAdvancedGrouping(t *testing.T) { // gauge-1 gauge1 := ilm.Metrics().AppendEmpty() gauge1.SetName("gauge-1") - gauge1.SetDataType(pdata.MetricDataTypeGauge) + gauge1.SetDataType(pmetric.MetricDataTypeGauge) datapoint := gauge1.Gauge().DataPoints().AppendEmpty() datapoint.Attributes().UpsertString("host.name", "host-A") datapoint.Attributes().UpsertString("id", "eth0") @@ -664,7 +667,7 @@ func TestMetricAdvancedGrouping(t *testing.T) { // mixed-type (same name but different TYPE) mixedType2 := ilm.Metrics().AppendEmpty() mixedType2.SetName("mixed-type") - mixedType2.SetDataType(pdata.MetricDataTypeSum) + mixedType2.SetDataType(pmetric.MetricDataTypeSum) datapoint = mixedType2.Sum().DataPoints().AppendEmpty() datapoint.Attributes().UpsertString("host.name", "host-A") datapoint.Attributes().UpsertString("id", "eth0") @@ -675,7 +678,7 @@ func TestMetricAdvancedGrouping(t *testing.T) { // dontmove (metric that will not move to another resource) dontmove := ilm.Metrics().AppendEmpty() dontmove.SetName("dont-move") - dontmove.SetDataType(pdata.MetricDataTypeGauge) + dontmove.SetDataType(pmetric.MetricDataTypeGauge) datapoint = dontmove.Gauge().DataPoints().AppendEmpty() datapoint.Attributes().UpsertString("id", "eth0") @@ -696,7 +699,7 @@ func TestMetricAdvancedGrouping(t *testing.T) { assert.Equal(t, 1, localhost.ScopeMetrics().At(0).Metrics().Len()) localhostMetric := localhost.ScopeMetrics().At(0).Metrics().At(0) assert.Equal(t, "dont-move", localhostMetric.Name()) - assert.Equal(t, pdata.MetricDataTypeGauge, localhostMetric.DataType()) + assert.Equal(t, pmetric.MetricDataTypeGauge, localhostMetric.DataType()) // We must have host-A hostA, foundHostA := retrieveHostResource(processedMetrics.ResourceMetrics(), "host-A") @@ -704,17 +707,17 @@ func TestMetricAdvancedGrouping(t *testing.T) { assert.Equal(t, 1, hostA.Resource().Attributes().Len()) assert.Equal(t, 1, hostA.ScopeMetrics().Len()) assert.Equal(t, 3, hostA.ScopeMetrics().At(0).Metrics().Len()) - hostAGauge1, foundHostAGauge1 := retrieveMetric(hostA.ScopeMetrics().At(0).Metrics(), "gauge-1", pdata.MetricDataTypeGauge) + hostAGauge1, foundHostAGauge1 := retrieveMetric(hostA.ScopeMetrics().At(0).Metrics(), "gauge-1", pmetric.MetricDataTypeGauge) assert.True(t, foundHostAGauge1) assert.Equal(t, 4, hostAGauge1.Gauge().DataPoints().Len()) assert.Equal(t, 1, hostAGauge1.Gauge().DataPoints().At(0).Attributes().Len()) metricIDAttribute, foundMetricIDAttribute := hostAGauge1.Gauge().DataPoints().At(0).Attributes().Get("id") assert.True(t, foundMetricIDAttribute) assert.Equal(t, "eth0", metricIDAttribute.AsString()) - hostAMixedGauge, foundHostAMixedGauge := retrieveMetric(hostA.ScopeMetrics().At(0).Metrics(), "mixed-type", pdata.MetricDataTypeGauge) + hostAMixedGauge, foundHostAMixedGauge := retrieveMetric(hostA.ScopeMetrics().At(0).Metrics(), "mixed-type", pmetric.MetricDataTypeGauge) assert.True(t, foundHostAMixedGauge) assert.Equal(t, 2, hostAMixedGauge.Gauge().DataPoints().Len()) - hostAMixedSum, foundHostAMixedSum := retrieveMetric(hostA.ScopeMetrics().At(0).Metrics(), "mixed-type", pdata.MetricDataTypeSum) + hostAMixedSum, foundHostAMixedSum := retrieveMetric(hostA.ScopeMetrics().At(0).Metrics(), "mixed-type", pmetric.MetricDataTypeSum) assert.True(t, foundHostAMixedSum) assert.Equal(t, 2, hostAMixedSum.Sum().DataPoints().Len()) @@ -724,16 +727,16 @@ func TestMetricAdvancedGrouping(t *testing.T) { assert.Equal(t, 1, hostB.Resource().Attributes().Len()) assert.Equal(t, 1, hostB.ScopeMetrics().Len()) assert.Equal(t, 2, hostB.ScopeMetrics().At(0).Metrics().Len()) - hostBGauge1, foundHostBGauge1 := retrieveMetric(hostB.ScopeMetrics().At(0).Metrics(), "gauge-1", pdata.MetricDataTypeGauge) + hostBGauge1, foundHostBGauge1 := retrieveMetric(hostB.ScopeMetrics().At(0).Metrics(), "gauge-1", pmetric.MetricDataTypeGauge) assert.True(t, foundHostBGauge1) assert.Equal(t, 2, hostBGauge1.Gauge().DataPoints().Len()) - hostBMixedGauge, foundHostBMixedGauge := retrieveMetric(hostB.ScopeMetrics().At(0).Metrics(), "mixed-type", pdata.MetricDataTypeGauge) + hostBMixedGauge, foundHostBMixedGauge := retrieveMetric(hostB.ScopeMetrics().At(0).Metrics(), "mixed-type", pmetric.MetricDataTypeGauge) assert.True(t, foundHostBMixedGauge) assert.Equal(t, 1, hostBMixedGauge.Gauge().DataPoints().Len()) } // Test helper function that retrieves the resource with the specified "host.name" attribute -func retrieveHostResource(resources pdata.ResourceMetricsSlice, hostname string) (pdata.ResourceMetrics, bool) { +func retrieveHostResource(resources pmetric.ResourceMetricsSlice, hostname string) (pmetric.ResourceMetrics, bool) { for i := 0; i < resources.Len(); i++ { resource := resources.At(i) hostnameValue, foundKey := resource.Resource().Attributes().Get("host.name") @@ -741,18 +744,18 @@ func retrieveHostResource(resources pdata.ResourceMetricsSlice, hostname string) return resource, true } } - return pdata.ResourceMetrics{}, false + return pmetric.ResourceMetrics{}, false } // Test helper function that retrieves the specified metric -func retrieveMetric(metrics pdata.MetricSlice, name string, metricType pdata.MetricDataType) (pdata.Metric, bool) { +func retrieveMetric(metrics pmetric.MetricSlice, name string, metricType pmetric.MetricDataType) (pmetric.Metric, bool) { for i := 0; i < metrics.Len(); i++ { metric := metrics.At(i) if metric.Name() == name && metric.DataType() == metricType { return metric, true } } - return pdata.Metric{}, false + return pmetric.Metric{}, false } func TestCompacting(t *testing.T) { diff --git a/processor/groupbytraceprocessor/event.go b/processor/groupbytraceprocessor/event.go index 3afe8951b826..b9b7a9cdd5a6 100644 --- a/processor/groupbytraceprocessor/event.go +++ b/processor/groupbytraceprocessor/event.go @@ -24,7 +24,8 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -63,8 +64,8 @@ type event struct { } type tracesWithID struct { - id pdata.TraceID - td pdata.Traces + id pcommon.TraceID + td ptrace.Traces } // eventMachine is a machine that accepts events in a typically non-blocking manner, @@ -81,9 +82,9 @@ type eventMachine struct { logger *zap.Logger onTraceReceived func(td tracesWithID, worker *eventMachineWorker) error - onTraceExpired func(traceID pdata.TraceID, worker *eventMachineWorker) error - onTraceReleased func(rss []pdata.ResourceSpans) error - onTraceRemoved func(traceID pdata.TraceID) error + onTraceExpired func(traceID pcommon.TraceID, worker *eventMachineWorker) error + onTraceReleased func(rss []ptrace.ResourceSpans) error + onTraceRemoved func(traceID pcommon.TraceID) error onError func(event) @@ -171,7 +172,7 @@ func (em *eventMachine) handleEvent(e event, w *eventMachineWorker) { em.callOnError(e) return } - payload, ok := e.payload.(pdata.TraceID) + payload, ok := e.payload.(pcommon.TraceID) if !ok { // the payload had an unexpected type! em.callOnError(e) @@ -187,7 +188,7 @@ func (em *eventMachine) handleEvent(e event, w *eventMachineWorker) { em.callOnError(e) return } - payload, ok := e.payload.([]pdata.ResourceSpans) + payload, ok := e.payload.([]ptrace.ResourceSpans) if !ok { // the payload had an unexpected type! em.callOnError(e) @@ -203,7 +204,7 @@ func (em *eventMachine) handleEvent(e event, w *eventMachineWorker) { em.callOnError(e) return } - payload, ok := e.payload.(pdata.TraceID) + payload, ok := e.payload.(pcommon.TraceID) if !ok { // the payload had an unexpected type! em.callOnError(e) @@ -221,7 +222,7 @@ func (em *eventMachine) handleEvent(e event, w *eventMachineWorker) { } // consume takes a single trace and routes it to one of the workers. -func (em *eventMachine) consume(td pdata.Traces) error { +func (em *eventMachine) consume(td ptrace.Traces) error { traceID, err := getTraceID(td) if err != nil { return fmt.Errorf("eventmachine consume failed: %w", err) @@ -241,7 +242,7 @@ func (em *eventMachine) consume(td pdata.Traces) error { return nil } -func workerIndexForTraceID(traceID pdata.TraceID, numWorkers int) uint64 { +func workerIndexForTraceID(traceID pcommon.TraceID, numWorkers int) uint64 { hash := hashPool.Get().(*maphash.Hash) defer func() { hash.Reset() @@ -361,20 +362,20 @@ func doWithTimeout(timeout time.Duration, do func() error) (bool, error) { } } -func getTraceID(td pdata.Traces) (pdata.TraceID, error) { +func getTraceID(td ptrace.Traces) (pcommon.TraceID, error) { rss := td.ResourceSpans() if rss.Len() == 0 { - return pdata.InvalidTraceID(), errNoTraceID + return pcommon.InvalidTraceID(), errNoTraceID } ilss := rss.At(0).ScopeSpans() if ilss.Len() == 0 { - return pdata.InvalidTraceID(), errNoTraceID + return pcommon.InvalidTraceID(), errNoTraceID } spans := ilss.At(0).Spans() if spans.Len() == 0 { - return pdata.InvalidTraceID(), errNoTraceID + return pcommon.InvalidTraceID(), errNoTraceID } return spans.At(0).TraceID(), nil diff --git a/processor/groupbytraceprocessor/event_test.go b/processor/groupbytraceprocessor/event_test.go index a1d16533c323..fe1d04fe4ead 100644 --- a/processor/groupbytraceprocessor/event_test.go +++ b/processor/groupbytraceprocessor/event_test.go @@ -26,7 +26,8 @@ import ( "github.com/stretchr/testify/require" "go.opencensus.io/stats" "go.opencensus.io/stats/view" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -40,7 +41,7 @@ func TestEventCallback(t *testing.T) { { casename: "onTraceReceived", typ: traceReceived, - payload: tracesWithID{id: pdata.InvalidTraceID(), td: pdata.NewTraces()}, + payload: tracesWithID{id: pcommon.InvalidTraceID(), td: ptrace.NewTraces()}, registerCallback: func(em *eventMachine, wg *sync.WaitGroup) { em.onTraceReceived = func(received tracesWithID, worker *eventMachineWorker) error { wg.Done() @@ -51,11 +52,11 @@ func TestEventCallback(t *testing.T) { { casename: "onTraceExpired", typ: traceExpired, - payload: pdata.NewTraceID([16]byte{1, 2, 3, 4}), + payload: pcommon.NewTraceID([16]byte{1, 2, 3, 4}), registerCallback: func(em *eventMachine, wg *sync.WaitGroup) { - em.onTraceExpired = func(expired pdata.TraceID, worker *eventMachineWorker) error { + em.onTraceExpired = func(expired pcommon.TraceID, worker *eventMachineWorker) error { wg.Done() - assert.Equal(t, pdata.NewTraceID([16]byte{1, 2, 3, 4}), expired) + assert.Equal(t, pcommon.NewTraceID([16]byte{1, 2, 3, 4}), expired) return nil } }, @@ -63,9 +64,9 @@ func TestEventCallback(t *testing.T) { { casename: "onTraceReleased", typ: traceReleased, - payload: []pdata.ResourceSpans{}, + payload: []ptrace.ResourceSpans{}, registerCallback: func(em *eventMachine, wg *sync.WaitGroup) { - em.onTraceReleased = func(expired []pdata.ResourceSpans) error { + em.onTraceReleased = func(expired []ptrace.ResourceSpans) error { wg.Done() return nil } @@ -74,11 +75,11 @@ func TestEventCallback(t *testing.T) { { casename: "onTraceRemoved", typ: traceRemoved, - payload: pdata.NewTraceID([16]byte{1, 2, 3, 4}), + payload: pcommon.NewTraceID([16]byte{1, 2, 3, 4}), registerCallback: func(em *eventMachine, wg *sync.WaitGroup) { - em.onTraceRemoved = func(expired pdata.TraceID) error { + em.onTraceRemoved = func(expired pcommon.TraceID) error { wg.Done() - assert.Equal(t, pdata.NewTraceID([16]byte{1, 2, 3, 4}), expired) + assert.Equal(t, pcommon.NewTraceID([16]byte{1, 2, 3, 4}), expired) return nil } }, @@ -175,7 +176,7 @@ func TestEventInvalidPayload(t *testing.T) { casename: "onTraceExpired", typ: traceExpired, registerCallback: func(em *eventMachine, wg *sync.WaitGroup) { - em.onTraceExpired = func(expired pdata.TraceID, worker *eventMachineWorker) error { + em.onTraceExpired = func(expired pcommon.TraceID, worker *eventMachineWorker) error { return nil } }, @@ -184,7 +185,7 @@ func TestEventInvalidPayload(t *testing.T) { casename: "onTraceReleased", typ: traceReleased, registerCallback: func(em *eventMachine, wg *sync.WaitGroup) { - em.onTraceReleased = func(released []pdata.ResourceSpans) error { + em.onTraceReleased = func(released []ptrace.ResourceSpans) error { return nil } }, @@ -193,7 +194,7 @@ func TestEventInvalidPayload(t *testing.T) { casename: "onTraceRemoved", typ: traceRemoved, registerCallback: func(em *eventMachine, wg *sync.WaitGroup) { - em.onTraceRemoved = func(expired pdata.TraceID) error { + em.onTraceRemoved = func(expired pcommon.TraceID) error { return nil } }, @@ -283,11 +284,11 @@ func TestEventTracePerWorker(t *testing.T) { workerForTrace = w w.fire(event{ typ: traceExpired, - payload: pdata.NewTraceID([16]byte{1}), + payload: pcommon.NewTraceID([16]byte{1}), }) return nil } - em.onTraceExpired = func(id pdata.TraceID, w *eventMachineWorker) error { + em.onTraceExpired = func(id pcommon.TraceID, w *eventMachineWorker) error { assert.Equal(t, workerForTrace, w) wg.Done() return nil @@ -295,11 +296,11 @@ func TestEventTracePerWorker(t *testing.T) { em.startInBackground() defer em.shutdown() - td := pdata.NewTraces() + td := ptrace.NewTraces() ils := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty() if tt.traceID != [16]byte{} { span := ils.Spans().AppendEmpty() - span.SetTraceID(pdata.NewTraceID(tt.traceID)) + span.SetTraceID(pcommon.NewTraceID(tt.traceID)) } // test @@ -335,14 +336,14 @@ func TestEventConsumeConsistency(t *testing.T) { }, } { t.Run(tt.casename, func(t *testing.T) { - realTraceID := workerIndexForTraceID(pdata.NewTraceID(tt.traceID), 100) + realTraceID := workerIndexForTraceID(pcommon.NewTraceID(tt.traceID), 100) var wg sync.WaitGroup for i := 0; i < 50; i++ { wg.Add(1) go func() { defer wg.Done() for j := 0; j < 30; j++ { - assert.Equal(t, realTraceID, workerIndexForTraceID(pdata.NewTraceID(tt.traceID), 100)) + assert.Equal(t, realTraceID, workerIndexForTraceID(pcommon.NewTraceID(tt.traceID), 100)) } }() } @@ -362,11 +363,11 @@ func TestEventShutdown(t *testing.T) { atomic.StoreInt64(&traceReceivedFired, 1) return nil } - em.onTraceExpired = func(pdata.TraceID, *eventMachineWorker) error { + em.onTraceExpired = func(pcommon.TraceID, *eventMachineWorker) error { atomic.StoreInt64(&traceExpiredFired, 1) return nil } - em.onTraceRemoved = func(pdata.TraceID) error { + em.onTraceRemoved = func(pcommon.TraceID) error { wg.Wait() return nil } @@ -375,15 +376,15 @@ func TestEventShutdown(t *testing.T) { // test em.workers[0].fire(event{ typ: traceReceived, - payload: tracesWithID{id: pdata.InvalidTraceID(), td: pdata.NewTraces()}, + payload: tracesWithID{id: pcommon.InvalidTraceID(), td: ptrace.NewTraces()}, }) em.workers[0].fire(event{ typ: traceRemoved, - payload: pdata.NewTraceID([16]byte{1, 2, 3, 4}), + payload: pcommon.NewTraceID([16]byte{1, 2, 3, 4}), }) em.workers[0].fire(event{ typ: traceRemoved, - payload: pdata.NewTraceID([16]byte{1, 2, 3, 4}), + payload: pcommon.NewTraceID([16]byte{1, 2, 3, 4}), }) time.Sleep(10 * time.Millisecond) // give it a bit of time to process the items @@ -404,7 +405,7 @@ func TestEventShutdown(t *testing.T) { // new events should *not* be processed em.workers[0].fire(event{ typ: traceExpired, - payload: pdata.NewTraceID([16]byte{1, 2, 3, 4}), + payload: pcommon.NewTraceID([16]byte{1, 2, 3, 4}), }) // verify diff --git a/processor/groupbytraceprocessor/go.mod b/processor/groupbytraceprocessor/go.mod index aaa1698469c1..1a7d7ae343dc 100644 --- a/processor/groupbytraceprocessor/go.mod +++ b/processor/groupbytraceprocessor/go.mod @@ -6,8 +6,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -16,7 +16,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -24,7 +24,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -34,3 +33,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal => ../../pkg/batchpersignal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/groupbytraceprocessor/go.sum b/processor/groupbytraceprocessor/go.sum index d714d365e27b..9748382089c6 100644 --- a/processor/groupbytraceprocessor/go.sum +++ b/processor/groupbytraceprocessor/go.sum @@ -94,8 +94,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -140,8 +140,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -156,15 +154,15 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -220,7 +218,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/groupbytraceprocessor/processor.go b/processor/groupbytraceprocessor/processor.go index 57276d56e3d7..686d263cad27 100644 --- a/processor/groupbytraceprocessor/processor.go +++ b/processor/groupbytraceprocessor/processor.go @@ -22,7 +22,8 @@ import ( "go.opencensus.io/stats" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/multierr" "go.uber.org/zap" @@ -79,7 +80,7 @@ func newGroupByTraceProcessor(logger *zap.Logger, st storage, nextConsumer consu return sp } -func (sp *groupByTraceProcessor) ConsumeTraces(_ context.Context, td pdata.Traces) error { +func (sp *groupByTraceProcessor) ConsumeTraces(_ context.Context, td ptrace.Traces) error { var errs error for _, singleTrace := range batchpersignal.SplitTraces(td) { errs = multierr.Append(errs, sp.eventMachine.consume(singleTrace)) @@ -157,7 +158,7 @@ func (sp *groupByTraceProcessor) onTraceReceived(trace tracesWithID, worker *eve return nil } -func (sp *groupByTraceProcessor) onTraceExpired(traceID pdata.TraceID, worker *eventMachineWorker) error { +func (sp *groupByTraceProcessor) onTraceExpired(traceID pcommon.TraceID, worker *eventMachineWorker) error { sp.logger.Debug("processing expired", zap.String("traceID", traceID.HexString())) @@ -182,7 +183,7 @@ func (sp *groupByTraceProcessor) onTraceExpired(traceID pdata.TraceID, worker *e return nil } -func (sp *groupByTraceProcessor) markAsReleased(traceID pdata.TraceID, fire func(...event)) error { +func (sp *groupByTraceProcessor) markAsReleased(traceID pcommon.TraceID, fire func(...event)) error { // #get is a potentially blocking operation trace, err := sp.st.get(traceID) if err != nil { @@ -208,8 +209,8 @@ func (sp *groupByTraceProcessor) markAsReleased(traceID pdata.TraceID, fire func return nil } -func (sp *groupByTraceProcessor) onTraceReleased(rss []pdata.ResourceSpans) error { - trace := pdata.NewTraces() +func (sp *groupByTraceProcessor) onTraceReleased(rss []ptrace.ResourceSpans) error { + trace := ptrace.NewTraces() for _, rs := range rss { trs := trace.ResourceSpans().AppendEmpty() rs.CopyTo(trs) @@ -228,7 +229,7 @@ func (sp *groupByTraceProcessor) onTraceReleased(rss []pdata.ResourceSpans) erro return nil } -func (sp *groupByTraceProcessor) onTraceRemoved(traceID pdata.TraceID) error { +func (sp *groupByTraceProcessor) onTraceRemoved(traceID pcommon.TraceID) error { trace, err := sp.st.delete(traceID) if err != nil { return fmt.Errorf("couldn't delete trace %q from the storage: %w", traceID.HexString(), err) @@ -241,7 +242,7 @@ func (sp *groupByTraceProcessor) onTraceRemoved(traceID pdata.TraceID) error { return nil } -func (sp *groupByTraceProcessor) addSpans(traceID pdata.TraceID, trace pdata.Traces) error { +func (sp *groupByTraceProcessor) addSpans(traceID pcommon.TraceID, trace ptrace.Traces) error { sp.logger.Debug("creating trace at the storage", zap.String("traceID", traceID.HexString())) return sp.st.createOrAppend(traceID, trace) } diff --git a/processor/groupbytraceprocessor/processor_test.go b/processor/groupbytraceprocessor/processor_test.go index 0f25abec8ef8..a3a8a332d21a 100644 --- a/processor/groupbytraceprocessor/processor_test.go +++ b/processor/groupbytraceprocessor/processor_test.go @@ -25,7 +25,8 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal" @@ -42,7 +43,7 @@ func TestTraceIsDispatchedAfterDuration(t *testing.T) { NumWorkers: 4, } mockProcessor := &mockProcessor{ - onTraces: func(ctx context.Context, received pdata.Traces) error { + onTraces: func(ctx context.Context, received ptrace.Traces) error { assert.Equal(t, traces, received) wgReceived.Done() return nil @@ -54,7 +55,7 @@ func TestTraceIsDispatchedAfterDuration(t *testing.T) { st := &mockStorage{ onCreateOrAppend: backing.createOrAppend, onGet: backing.get, - onDelete: func(traceID pdata.TraceID) ([]pdata.ResourceSpans, error) { + onDelete: func(traceID pcommon.TraceID) ([]ptrace.ResourceSpans, error) { wgDeleted.Done() return backing.delete(traceID) }, @@ -92,9 +93,9 @@ func TestInternalCacheLimit(t *testing.T) { wg.Add(5) // 5 traces are expected to be received - var receivedTraceIDs []pdata.TraceID + var receivedTraceIDs []pcommon.TraceID mockProcessor := &mockProcessor{} - mockProcessor.onTraces = func(ctx context.Context, received pdata.Traces) error { + mockProcessor.onTraces = func(ctx context.Context, received ptrace.Traces) error { traceID := received.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID() receivedTraceIDs = append(receivedTraceIDs, traceID) wg.Done() @@ -121,7 +122,7 @@ func TestInternalCacheLimit(t *testing.T) { // 6 iterations for _, traceID := range traceIDs { - batch := simpleTracesWithID(pdata.NewTraceID(traceID)) + batch := simpleTracesWithID(pcommon.NewTraceID(traceID)) assert.NoError(t, p.ConsumeTraces(ctx, batch)) } @@ -131,7 +132,7 @@ func TestInternalCacheLimit(t *testing.T) { assert.Equal(t, 5, len(receivedTraceIDs)) for i := 5; i > 0; i-- { // last 5 traces - traceID := pdata.NewTraceID(traceIDs[i]) + traceID := pcommon.NewTraceID(traceIDs[i]) assert.Contains(t, receivedTraceIDs, traceID) } @@ -168,14 +169,14 @@ func TestProcessBatchDoesntFail(t *testing.T) { st := newMemoryStorage() next := &mockProcessor{} - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - trace := pdata.NewTraces() + trace := ptrace.NewTraces() rs := trace.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4})) p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) assert.NotNil(t, p) @@ -192,7 +193,7 @@ func TestTraceDisappearedFromStorageBeforeReleasing(t *testing.T) { NumWorkers: 4, } st := &mockStorage{ - onGet: func(pdata.TraceID) ([]pdata.ResourceSpans, error) { + onGet: func(pcommon.TraceID) ([]ptrace.ResourceSpans, error) { return nil, nil }, } @@ -201,7 +202,7 @@ func TestTraceDisappearedFromStorageBeforeReleasing(t *testing.T) { p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) require.NotNil(t, p) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) batch := simpleTracesWithID(traceID) ctx := context.Background() @@ -228,7 +229,7 @@ func TestTraceErrorFromStorageWhileReleasing(t *testing.T) { } expectedError := errors.New("some unexpected error") st := &mockStorage{ - onGet: func(pdata.TraceID) ([]pdata.ResourceSpans, error) { + onGet: func(pcommon.TraceID) ([]ptrace.ResourceSpans, error) { return nil, expectedError }, } @@ -237,7 +238,7 @@ func TestTraceErrorFromStorageWhileReleasing(t *testing.T) { p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) require.NotNil(t, p) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) batch := simpleTracesWithID(traceID) ctx := context.Background() @@ -264,7 +265,7 @@ func TestTraceErrorFromStorageWhileProcessingTrace(t *testing.T) { } expectedError := errors.New("some unexpected error") st := &mockStorage{ - onCreateOrAppend: func(pdata.TraceID, pdata.Traces) error { + onCreateOrAppend: func(pcommon.TraceID, ptrace.Traces) error { return expectedError }, } @@ -273,15 +274,15 @@ func TestTraceErrorFromStorageWhileProcessingTrace(t *testing.T) { p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) require.NotNil(t, p) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - trace := pdata.NewTraces() + trace := ptrace.NewTraces() rss := trace.ResourceSpans() rs := rss.AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4})) batch := batchpersignal.SplitTraces(trace) @@ -302,9 +303,9 @@ func TestAddSpansToExistingTrace(t *testing.T) { } st := newMemoryStorage() - var receivedTraces []pdata.ResourceSpans + var receivedTraces []ptrace.ResourceSpans next := &mockProcessor{ - onTraces: func(ctx context.Context, traces pdata.Traces) error { + onTraces: func(ctx context.Context, traces ptrace.Traces) error { require.Equal(t, 2, traces.ResourceSpans().Len()) receivedTraces = append(receivedTraces, traces.ResourceSpans().At(0)) receivedTraces = append(receivedTraces, traces.ResourceSpans().At(1)) @@ -320,7 +321,7 @@ func TestAddSpansToExistingTrace(t *testing.T) { assert.NoError(t, p.Start(ctx, nil)) defer p.Shutdown(ctx) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) // test first := simpleTracesWithID(traceID) @@ -353,15 +354,15 @@ func TestTraceErrorFromStorageWhileProcessingSecondTrace(t *testing.T) { p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) require.NotNil(t, p) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - trace := pdata.NewTraces() + trace := ptrace.NewTraces() rss := trace.ResourceSpans() rs := rss.AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4})) batch := batchpersignal.SplitTraces(trace) @@ -370,7 +371,7 @@ func TestTraceErrorFromStorageWhileProcessingSecondTrace(t *testing.T) { assert.NoError(t, err) expectedError := errors.New("some unexpected error") - st.onCreateOrAppend = func(pdata.TraceID, pdata.Traces) error { + st.onCreateOrAppend = func(pcommon.TraceID, ptrace.Traces) error { return expectedError } @@ -392,7 +393,7 @@ func TestErrorFromStorageWhileRemovingTrace(t *testing.T) { } expectedError := errors.New("some unexpected error") st := &mockStorage{ - onDelete: func(pdata.TraceID) ([]pdata.ResourceSpans, error) { + onDelete: func(pcommon.TraceID) ([]ptrace.ResourceSpans, error) { return nil, expectedError }, } @@ -401,7 +402,7 @@ func TestErrorFromStorageWhileRemovingTrace(t *testing.T) { p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) require.NotNil(t, p) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) // test err := p.onTraceRemoved(traceID) @@ -418,7 +419,7 @@ func TestTraceNotFoundWhileRemovingTrace(t *testing.T) { NumWorkers: 4, } st := &mockStorage{ - onDelete: func(pdata.TraceID) ([]pdata.ResourceSpans, error) { + onDelete: func(pcommon.TraceID) ([]ptrace.ResourceSpans, error) { return nil, nil }, } @@ -427,7 +428,7 @@ func TestTraceNotFoundWhileRemovingTrace(t *testing.T) { p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) require.NotNil(t, p) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) // test err := p.onTraceRemoved(traceID) @@ -447,7 +448,7 @@ func TestTracesAreDispatchedInIndividualBatches(t *testing.T) { } st := newMemoryStorage() next := &mockProcessor{ - onTraces: func(_ context.Context, traces pdata.Traces) error { + onTraces: func(_ context.Context, traces ptrace.Traces) error { // we should receive two batches, each one with one trace assert.Equal(t, 1, traces.ResourceSpans().Len()) wg.Done() @@ -462,17 +463,17 @@ func TestTracesAreDispatchedInIndividualBatches(t *testing.T) { assert.NoError(t, p.Start(ctx, nil)) defer p.Shutdown(ctx) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - firstTrace := pdata.NewTraces() + firstTrace := ptrace.NewTraces() firstRss := firstTrace.ResourceSpans() firstResourceSpans := firstRss.AppendEmpty() ils := firstResourceSpans.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - secondTraceID := pdata.NewTraceID([16]byte{2, 3, 4, 5}) - secondTrace := pdata.NewTraces() + secondTraceID := pcommon.NewTraceID([16]byte{2, 3, 4, 5}) + secondTrace := ptrace.NewTraces() secondRss := secondTrace.ResourceSpans() secondResourceSpans := secondRss.AppendEmpty() secondIls := secondResourceSpans.ScopeSpans().AppendEmpty() @@ -504,19 +505,19 @@ func TestErrorOnProcessResourceSpansContinuesProcessing(t *testing.T) { p := newGroupByTraceProcessor(zap.NewNop(), st, next, config) require.NotNil(t, p) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - trace := pdata.NewTraces() + trace := ptrace.NewTraces() rss := trace.ResourceSpans() rs := rss.AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4})) expectedError := errors.New("some unexpected error") returnedError := false - st.onCreateOrAppend = func(pdata.TraceID, pdata.Traces) error { + st.onCreateOrAppend = func(pcommon.TraceID, ptrace.Traces) error { returnedError = true return expectedError } @@ -553,7 +554,7 @@ func BenchmarkConsumeTracesCompleteOnFirstBatch(b *testing.B) { // For each input trace there are always <= 2 events in the machine simultaneously. semaphoreCh := make(chan struct{}, bufferSize/2) - next := &mockProcessor{onTraces: func(context.Context, pdata.Traces) error { + next := &mockProcessor{onTraces: func(context.Context, ptrace.Traces) error { <-semaphoreCh return nil }} @@ -566,7 +567,7 @@ func BenchmarkConsumeTracesCompleteOnFirstBatch(b *testing.B) { defer p.Shutdown(ctx) for n := 0; n < b.N; n++ { - traceID := pdata.NewTraceID([16]byte{byte(1 + n), 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{byte(1 + n), 2, 3, 4}) trace := simpleTracesWithID(traceID) p.ConsumeTraces(context.Background(), trace) } @@ -574,12 +575,12 @@ func BenchmarkConsumeTracesCompleteOnFirstBatch(b *testing.B) { type mockProcessor struct { mutex sync.Mutex - onTraces func(context.Context, pdata.Traces) error + onTraces func(context.Context, ptrace.Traces) error } var _ component.TracesProcessor = (*mockProcessor)(nil) -func (m *mockProcessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (m *mockProcessor) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { if m.onTraces != nil { m.mutex.Lock() defer m.mutex.Unlock() @@ -598,28 +599,28 @@ func (m *mockProcessor) Start(_ context.Context, _ component.Host) error { } type mockStorage struct { - onCreateOrAppend func(pdata.TraceID, pdata.Traces) error - onGet func(pdata.TraceID) ([]pdata.ResourceSpans, error) - onDelete func(pdata.TraceID) ([]pdata.ResourceSpans, error) + onCreateOrAppend func(pcommon.TraceID, ptrace.Traces) error + onGet func(pcommon.TraceID) ([]ptrace.ResourceSpans, error) + onDelete func(pcommon.TraceID) ([]ptrace.ResourceSpans, error) onStart func() error onShutdown func() error } var _ storage = (*mockStorage)(nil) -func (st *mockStorage) createOrAppend(traceID pdata.TraceID, trace pdata.Traces) error { +func (st *mockStorage) createOrAppend(traceID pcommon.TraceID, trace ptrace.Traces) error { if st.onCreateOrAppend != nil { return st.onCreateOrAppend(traceID, trace) } return nil } -func (st *mockStorage) get(traceID pdata.TraceID) ([]pdata.ResourceSpans, error) { +func (st *mockStorage) get(traceID pcommon.TraceID) ([]ptrace.ResourceSpans, error) { if st.onGet != nil { return st.onGet(traceID) } return nil, nil } -func (st *mockStorage) delete(traceID pdata.TraceID) ([]pdata.ResourceSpans, error) { +func (st *mockStorage) delete(traceID pcommon.TraceID) ([]ptrace.ResourceSpans, error) { if st.onDelete != nil { return st.onDelete(traceID) } @@ -647,17 +648,17 @@ var _ consumer.Traces = (*blockingConsumer)(nil) func (b *blockingConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (b *blockingConsumer) ConsumeTraces(context.Context, pdata.Traces) error { +func (b *blockingConsumer) ConsumeTraces(context.Context, ptrace.Traces) error { <-b.blockCh return nil } -func simpleTraces() pdata.Traces { - return simpleTracesWithID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) +func simpleTraces() ptrace.Traces { + return simpleTracesWithID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) } -func simpleTracesWithID(traceID pdata.TraceID) pdata.Traces { - traces := pdata.NewTraces() +func simpleTracesWithID(traceID pcommon.TraceID) ptrace.Traces { + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() ils.Spans().AppendEmpty().SetTraceID(traceID) diff --git a/processor/groupbytraceprocessor/ring_buffer.go b/processor/groupbytraceprocessor/ring_buffer.go index 24c92c5f9694..28c63f11e9a0 100644 --- a/processor/groupbytraceprocessor/ring_buffer.go +++ b/processor/groupbytraceprocessor/ring_buffer.go @@ -14,26 +14,26 @@ package groupbytraceprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor" -import "go.opentelemetry.io/collector/model/pdata" +import "go.opentelemetry.io/collector/pdata/pcommon" // ringBuffer keeps an in-memory bounded buffer with the in-flight trace IDs type ringBuffer struct { index int size int - ids []pdata.TraceID - idToIndex map[pdata.TraceID]int // key is traceID, value is the index on the 'ids' slice + ids []pcommon.TraceID + idToIndex map[pcommon.TraceID]int // key is traceID, value is the index on the 'ids' slice } func newRingBuffer(size int) *ringBuffer { return &ringBuffer{ index: -1, // the first span to be received will be placed at position '0' size: size, - ids: make([]pdata.TraceID, size), - idToIndex: make(map[pdata.TraceID]int), + ids: make([]pcommon.TraceID, size), + idToIndex: make(map[pcommon.TraceID]int), } } -func (r *ringBuffer) put(traceID pdata.TraceID) pdata.TraceID { +func (r *ringBuffer) put(traceID pcommon.TraceID) pcommon.TraceID { // calculates the item in the ring that we'll store the trace r.index = (r.index + 1) % r.size @@ -52,18 +52,18 @@ func (r *ringBuffer) put(traceID pdata.TraceID) pdata.TraceID { return evicted } -func (r *ringBuffer) contains(traceID pdata.TraceID) bool { +func (r *ringBuffer) contains(traceID pcommon.TraceID) bool { _, found := r.idToIndex[traceID] return found } -func (r *ringBuffer) delete(traceID pdata.TraceID) bool { +func (r *ringBuffer) delete(traceID pcommon.TraceID) bool { index, found := r.idToIndex[traceID] if !found { return false } delete(r.idToIndex, traceID) - r.ids[index] = pdata.InvalidTraceID() + r.ids[index] = pcommon.InvalidTraceID() return true } diff --git a/processor/groupbytraceprocessor/ring_buffer_test.go b/processor/groupbytraceprocessor/ring_buffer_test.go index 77977937e9cb..40ac28e01573 100644 --- a/processor/groupbytraceprocessor/ring_buffer_test.go +++ b/processor/groupbytraceprocessor/ring_buffer_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestRingBufferCapacity(t *testing.T) { @@ -26,13 +26,13 @@ func TestRingBufferCapacity(t *testing.T) { buffer := newRingBuffer(5) // test - traceIDs := []pdata.TraceID{ - pdata.NewTraceID([16]byte{1, 2, 3, 4}), - pdata.NewTraceID([16]byte{2, 3, 4, 5}), - pdata.NewTraceID([16]byte{3, 4, 5, 6}), - pdata.NewTraceID([16]byte{4, 5, 6, 7}), - pdata.NewTraceID([16]byte{5, 6, 7, 8}), - pdata.NewTraceID([16]byte{6, 7, 8, 9}), + traceIDs := []pcommon.TraceID{ + pcommon.NewTraceID([16]byte{1, 2, 3, 4}), + pcommon.NewTraceID([16]byte{2, 3, 4, 5}), + pcommon.NewTraceID([16]byte{3, 4, 5, 6}), + pcommon.NewTraceID([16]byte{4, 5, 6, 7}), + pcommon.NewTraceID([16]byte{5, 6, 7, 8}), + pcommon.NewTraceID([16]byte{6, 7, 8, 9}), } for _, traceID := range traceIDs { buffer.put(traceID) @@ -51,7 +51,7 @@ func TestRingBufferCapacity(t *testing.T) { func TestDeleteFromBuffer(t *testing.T) { // prepare buffer := newRingBuffer(2) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) buffer.put(traceID) // test @@ -65,7 +65,7 @@ func TestDeleteFromBuffer(t *testing.T) { func TestDeleteNonExistingFromBuffer(t *testing.T) { // prepare buffer := newRingBuffer(2) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) // test deleted := buffer.delete(traceID) diff --git a/processor/groupbytraceprocessor/storage.go b/processor/groupbytraceprocessor/storage.go index dee327af5d80..5edb784b2a63 100644 --- a/processor/groupbytraceprocessor/storage.go +++ b/processor/groupbytraceprocessor/storage.go @@ -15,7 +15,8 @@ package groupbytraceprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbytraceprocessor" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) // storage is an abstraction for the span storage used by the groupbytrace processor. @@ -24,15 +25,15 @@ type storage interface { // createOrAppend will check whether the given trace ID is already in the storage and // will either append the given spans to the existing record, or create a new trace with // the given spans from trace - createOrAppend(pdata.TraceID, pdata.Traces) error + createOrAppend(pcommon.TraceID, ptrace.Traces) error // get will retrieve the trace based on the given trace ID, returning nil in case a trace // cannot be found - get(pdata.TraceID) ([]pdata.ResourceSpans, error) + get(pcommon.TraceID) ([]ptrace.ResourceSpans, error) // delete will remove the trace based on the given trace ID, returning the trace that was removed, // or nil in case a trace cannot be found - delete(pdata.TraceID) ([]pdata.ResourceSpans, error) + delete(pcommon.TraceID) ([]ptrace.ResourceSpans, error) // start gives the storage the opportunity to initialize any resources or procedures start() error diff --git a/processor/groupbytraceprocessor/storage_memory.go b/processor/groupbytraceprocessor/storage_memory.go index 292bbeca63c5..f7fdad13c495 100644 --- a/processor/groupbytraceprocessor/storage_memory.go +++ b/processor/groupbytraceprocessor/storage_memory.go @@ -20,12 +20,13 @@ import ( "time" "go.opencensus.io/stats" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) type memoryStorage struct { sync.RWMutex - content map[pdata.TraceID][]pdata.ResourceSpans + content map[pcommon.TraceID][]ptrace.ResourceSpans stopped bool stoppedLock sync.RWMutex metricsCollectionInterval time.Duration @@ -35,19 +36,19 @@ var _ storage = (*memoryStorage)(nil) func newMemoryStorage() *memoryStorage { return &memoryStorage{ - content: make(map[pdata.TraceID][]pdata.ResourceSpans), + content: make(map[pcommon.TraceID][]ptrace.ResourceSpans), metricsCollectionInterval: time.Second, } } -func (st *memoryStorage) createOrAppend(traceID pdata.TraceID, td pdata.Traces) error { +func (st *memoryStorage) createOrAppend(traceID pcommon.TraceID, td ptrace.Traces) error { st.Lock() defer st.Unlock() // getting zero value is fine content := st.content[traceID] - newRss := pdata.NewResourceSpansSlice() + newRss := ptrace.NewResourceSpansSlice() td.ResourceSpans().CopyTo(newRss) for i := 0; i < newRss.Len(); i++ { content = append(content, newRss.At(i)) @@ -56,7 +57,7 @@ func (st *memoryStorage) createOrAppend(traceID pdata.TraceID, td pdata.Traces) return nil } -func (st *memoryStorage) get(traceID pdata.TraceID) ([]pdata.ResourceSpans, error) { +func (st *memoryStorage) get(traceID pcommon.TraceID) ([]ptrace.ResourceSpans, error) { st.RLock() rss, ok := st.content[traceID] st.RUnlock() @@ -64,9 +65,9 @@ func (st *memoryStorage) get(traceID pdata.TraceID) ([]pdata.ResourceSpans, erro return nil, nil } - var result []pdata.ResourceSpans + var result []ptrace.ResourceSpans for _, rs := range rss { - newRS := pdata.NewResourceSpans() + newRS := ptrace.NewResourceSpans() rs.CopyTo(newRS) result = append(result, newRS) } @@ -76,7 +77,7 @@ func (st *memoryStorage) get(traceID pdata.TraceID) ([]pdata.ResourceSpans, erro // delete will return a reference to a ResourceSpans. Changes to the returned object may not be applied // to the version in the storage. -func (st *memoryStorage) delete(traceID pdata.TraceID) ([]pdata.ResourceSpans, error) { +func (st *memoryStorage) delete(traceID pcommon.TraceID) ([]ptrace.ResourceSpans, error) { st.Lock() defer st.Unlock() diff --git a/processor/groupbytraceprocessor/storage_memory_test.go b/processor/groupbytraceprocessor/storage_memory_test.go index b98388690f1c..096d98f6b21d 100644 --- a/processor/groupbytraceprocessor/storage_memory_test.go +++ b/processor/groupbytraceprocessor/storage_memory_test.go @@ -19,19 +19,20 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestMemoryCreateAndGetTrace(t *testing.T) { // prepare st := newMemoryStorage() - traceIDs := []pdata.TraceID{ - pdata.NewTraceID([16]byte{1, 2, 3, 4}), - pdata.NewTraceID([16]byte{2, 3, 4, 5}), + traceIDs := []pcommon.TraceID{ + pcommon.NewTraceID([16]byte{1, 2, 3, 4}), + pcommon.NewTraceID([16]byte{2, 3, 4, 5}), } - baseTrace := pdata.NewTraces() + baseTrace := ptrace.NewTraces() rss := baseTrace.ResourceSpans() rs := rss.AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() @@ -46,7 +47,7 @@ func TestMemoryCreateAndGetTrace(t *testing.T) { // verify assert.Equal(t, 2, st.count()) for _, traceID := range traceIDs { - expected := []pdata.ResourceSpans{baseTrace.ResourceSpans().At(0)} + expected := []ptrace.ResourceSpans{baseTrace.ResourceSpans().At(0)} expected[0].ScopeSpans().At(0).Spans().At(0).SetTraceID(traceID) retrieved, err := st.get(traceID) @@ -61,9 +62,9 @@ func TestMemoryDeleteTrace(t *testing.T) { // prepare st := newMemoryStorage() - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - trace := pdata.NewTraces() + trace := ptrace.NewTraces() rss := trace.ResourceSpans() rs := rss.AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() @@ -77,7 +78,7 @@ func TestMemoryDeleteTrace(t *testing.T) { // verify require.NoError(t, err) - assert.Equal(t, []pdata.ResourceSpans{trace.ResourceSpans().At(0)}, deleted) + assert.Equal(t, []ptrace.ResourceSpans{trace.ResourceSpans().At(0)}, deleted) retrieved, err := st.get(traceID) require.NoError(t, err) @@ -88,30 +89,30 @@ func TestMemoryAppendSpans(t *testing.T) { // prepare st := newMemoryStorage() - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - trace := pdata.NewTraces() + trace := ptrace.NewTraces() rss := trace.ResourceSpans() rs := rss.AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4})) st.createOrAppend(traceID, trace) - secondTrace := pdata.NewTraces() + secondTrace := ptrace.NewTraces() secondRss := secondTrace.ResourceSpans() secondRs := secondRss.AppendEmpty() secondIls := secondRs.ScopeSpans().AppendEmpty() secondSpan := secondIls.Spans().AppendEmpty() secondSpan.SetName("second-name") secondSpan.SetTraceID(traceID) - secondSpan.SetSpanID(pdata.NewSpanID([8]byte{5, 6, 7, 8})) + secondSpan.SetSpanID(pcommon.NewSpanID([8]byte{5, 6, 7, 8})) - expected := []pdata.ResourceSpans{ - pdata.NewResourceSpans(), - pdata.NewResourceSpans(), + expected := []ptrace.ResourceSpans{ + ptrace.NewResourceSpans(), + ptrace.NewResourceSpans(), } ils.CopyTo(expected[0].ScopeSpans().AppendEmpty()) secondIls.CopyTo(expected[1].ScopeSpans().AppendEmpty()) @@ -138,15 +139,15 @@ func TestMemoryAppendSpans(t *testing.T) { func TestMemoryTraceIsBeingCloned(t *testing.T) { // prepare st := newMemoryStorage() - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4}) - trace := pdata.NewTraces() + trace := ptrace.NewTraces() rss := trace.ResourceSpans() rs := rss.AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4})) span.SetName("should-not-be-changed") // test diff --git a/processor/k8sattributesprocessor/go.mod b/processor/k8sattributesprocessor/go.mod index 34a2568a479b..300cb35c89d9 100644 --- a/processor/k8sattributesprocessor/go.mod +++ b/processor/k8sattributesprocessor/go.mod @@ -6,8 +6,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 @@ -27,7 +28,7 @@ require ( github.com/googleapis/gnostic v0.5.5 // indirect github.com/imdario/mergo v0.3.11 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -41,17 +42,16 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/appengine v1.6.7 // indirect @@ -69,3 +69,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => ./../../internal/k8sconfig + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/k8sattributesprocessor/go.sum b/processor/k8sattributesprocessor/go.sum index 5358b73b2838..f9657bc7bbe0 100644 --- a/processor/k8sattributesprocessor/go.sum +++ b/processor/k8sattributesprocessor/go.sum @@ -67,7 +67,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -208,7 +208,6 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -253,8 +252,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -340,8 +339,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -369,10 +366,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -474,8 +473,9 @@ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -553,13 +553,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/processor/k8sattributesprocessor/pod_association.go b/processor/k8sattributesprocessor/pod_association.go index 027494208d71..0ced1a36df5a 100644 --- a/processor/k8sattributesprocessor/pod_association.go +++ b/processor/k8sattributesprocessor/pod_association.go @@ -20,8 +20,8 @@ import ( "strings" "go.opentelemetry.io/collector/client" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor/internal/kube" ) @@ -29,7 +29,7 @@ import ( // extractPodIds extracts IP and pod UID from attributes or request context. // It returns a value pair containing configured label and IP Address and/or Pod UID. // If empty value in return it means that attributes does not contains configured label to match resources for Pod. -func extractPodID(ctx context.Context, attrs pdata.Map, associations []kube.Association) (string, kube.PodIdentifier) { +func extractPodID(ctx context.Context, attrs pcommon.Map, associations []kube.Association) (string, kube.PodIdentifier) { // If pod association is not set if len(associations) == 0 { return extractPodIDNoAssociations(ctx, attrs) @@ -62,7 +62,7 @@ func extractPodID(ctx context.Context, attrs pdata.Map, associations []kube.Asso return "", "" } -func extractPodIDNoAssociations(ctx context.Context, attrs pdata.Map) (string, kube.PodIdentifier) { +func extractPodIDNoAssociations(ctx context.Context, attrs pcommon.Map) (string, kube.PodIdentifier) { var podIP, labelIP kube.PodIdentifier podIP = kube.PodIdentifier(stringAttributeFromMap(attrs, k8sIPLabelName)) if podIP != "" { @@ -117,9 +117,9 @@ func getConnectionIP(ctx context.Context) kube.PodIdentifier { } -func stringAttributeFromMap(attrs pdata.Map, key string) string { +func stringAttributeFromMap(attrs pcommon.Map, key string) string { if val, ok := attrs.Get(key); ok { - if val.Type() == pdata.ValueTypeString { + if val.Type() == pcommon.ValueTypeString { return val.StringVal() } } diff --git a/processor/k8sattributesprocessor/processor.go b/processor/k8sattributesprocessor/processor.go index 0b4d50372531..5741d26ee9fc 100644 --- a/processor/k8sattributesprocessor/processor.go +++ b/processor/k8sattributesprocessor/processor.go @@ -20,8 +20,11 @@ import ( "strconv" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.8.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig" @@ -73,7 +76,7 @@ func (kp *kubernetesprocessor) Shutdown(context.Context) error { } // processTraces process traces and add k8s metadata using resource IP or incoming IP as pod origin. -func (kp *kubernetesprocessor) processTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { +func (kp *kubernetesprocessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { kp.processResource(ctx, rss.At(i).Resource()) @@ -83,7 +86,7 @@ func (kp *kubernetesprocessor) processTraces(ctx context.Context, td pdata.Trace } // processMetrics process metrics and add k8s metadata using resource IP, hostname or incoming IP as pod origin. -func (kp *kubernetesprocessor) processMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (kp *kubernetesprocessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { kp.processResource(ctx, rm.At(i).Resource()) @@ -93,7 +96,7 @@ func (kp *kubernetesprocessor) processMetrics(ctx context.Context, md pdata.Metr } // processLogs process logs and add k8s metadata using resource IP, hostname or incoming IP as pod origin. -func (kp *kubernetesprocessor) processLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (kp *kubernetesprocessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rl := ld.ResourceLogs() for i := 0; i < rl.Len(); i++ { kp.processResource(ctx, rl.At(i).Resource()) @@ -103,7 +106,7 @@ func (kp *kubernetesprocessor) processLogs(ctx context.Context, ld pdata.Logs) ( } // processResource adds Pod metadata tags to resource based on pod association configuration -func (kp *kubernetesprocessor) processResource(ctx context.Context, resource pdata.Resource) { +func (kp *kubernetesprocessor) processResource(ctx context.Context, resource pcommon.Resource) { podIdentifierKey, podIdentifierValue := extractPodID(ctx, resource.Attributes(), kp.podAssociations) if podIdentifierKey != "" { resource.Attributes().InsertString(podIdentifierKey, string(podIdentifierValue)) @@ -132,7 +135,7 @@ func (kp *kubernetesprocessor) processResource(ctx context.Context, resource pda } // addContainerAttributes looks if pod has any container identifiers and adds additional container attributes -func (kp *kubernetesprocessor) addContainerAttributes(attrs pdata.Map, pod *kube.Pod) { +func (kp *kubernetesprocessor) addContainerAttributes(attrs pcommon.Map, pod *kube.Pod) { containerName := stringAttributeFromMap(attrs, conventions.AttributeK8SContainerName) if containerName == "" { return @@ -171,11 +174,11 @@ func (kp *kubernetesprocessor) getAttributesForPodsNamespace(namespace string) m } // intFromAttribute extracts int value from an attribute stored as string or int -func intFromAttribute(val pdata.Value) (int, error) { +func intFromAttribute(val pcommon.Value) (int, error) { switch val.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return int(val.IntVal()), nil - case pdata.ValueTypeString: + case pcommon.ValueTypeString: i, err := strconv.Atoi(val.StringVal()) if err != nil { return 0, err diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index bfb5094ab038..a56074ac1dbf 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -29,8 +29,11 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.8.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig" @@ -149,9 +152,9 @@ func newMultiTest( func (m *multiTest) testConsume( ctx context.Context, - traces pdata.Traces, - metrics pdata.Metrics, - logs pdata.Logs, + traces ptrace.Traces, + metrics pmetric.Metrics, + logs plog.Logs, errFunc func(err error), ) { errs := []error{ @@ -191,7 +194,7 @@ func (m *multiTest) assertResourceAttributesLen(batchNo int, attrsLen int) { assert.Equal(m.t, m.nextLogs.AllLogs()[batchNo].ResourceLogs().At(0).Resource().Attributes().Len(), attrsLen) } -func (m *multiTest) assertResource(batchNum int, resourceFunc func(res pdata.Resource)) { +func (m *multiTest) assertResource(batchNum int, resourceFunc func(res pcommon.Resource)) { rss := m.nextTrace.AllTraces()[batchNum].ResourceSpans() r := rss.At(0).Resource() @@ -228,10 +231,10 @@ func TestProcessorBadClientProvider(t *testing.T) { }, withKubeClientProvider(clientProvider)) } -type generateResourceFunc func(res pdata.Resource) +type generateResourceFunc func(res pcommon.Resource) -func generateTraces(resourceFunc ...generateResourceFunc) pdata.Traces { - t := pdata.NewTraces() +func generateTraces(resourceFunc ...generateResourceFunc) ptrace.Traces { + t := ptrace.NewTraces() rs := t.ResourceSpans().AppendEmpty() for _, resFun := range resourceFunc { res := rs.Resource() @@ -242,8 +245,8 @@ func generateTraces(resourceFunc ...generateResourceFunc) pdata.Traces { return t } -func generateMetrics(resourceFunc ...generateResourceFunc) pdata.Metrics { - m := pdata.NewMetrics() +func generateMetrics(resourceFunc ...generateResourceFunc) pmetric.Metrics { + m := pmetric.NewMetrics() ms := m.ResourceMetrics().AppendEmpty() for _, resFun := range resourceFunc { res := ms.Resource() @@ -254,8 +257,8 @@ func generateMetrics(resourceFunc ...generateResourceFunc) pdata.Metrics { return m } -func generateLogs(resourceFunc ...generateResourceFunc) pdata.Logs { - l := pdata.NewLogs() +func generateLogs(resourceFunc ...generateResourceFunc) plog.Logs { + l := plog.NewLogs() ls := l.ResourceLogs().AppendEmpty() for _, resFun := range resourceFunc { res := ls.Resource() @@ -267,31 +270,31 @@ func generateLogs(resourceFunc ...generateResourceFunc) pdata.Logs { } func withPassthroughIP(passthroughIP string) generateResourceFunc { - return func(res pdata.Resource) { + return func(res pcommon.Resource) { res.Attributes().InsertString(k8sIPLabelName, passthroughIP) } } func withHostname(hostname string) generateResourceFunc { - return func(res pdata.Resource) { + return func(res pcommon.Resource) { res.Attributes().InsertString(conventions.AttributeHostName, hostname) } } func withPodUID(uid string) generateResourceFunc { - return func(res pdata.Resource) { + return func(res pcommon.Resource) { res.Attributes().InsertString("k8s.pod.uid", uid) } } func withContainerName(containerName string) generateResourceFunc { - return func(res pdata.Resource) { + return func(res pcommon.Resource) { res.Attributes().InsertString(conventions.AttributeK8SContainerName, containerName) } } func withContainerRunID(containerRunID string) generateResourceFunc { - return func(res pdata.Resource) { + return func(res pcommon.Resource) { res.Attributes().InsertString(conventions.AttributeK8SContainerRestartCount, containerRunID) } } @@ -338,7 +341,7 @@ func TestIPDetectionFromContext(t *testing.T) { m.assertBatchesLen(1) m.assertResourceObjectLen(0) - m.assertResource(0, func(r pdata.Resource) { + m.assertResource(0, func(r pcommon.Resource) { require.Greater(t, r.Attributes().Len(), 0) assertResourceHasStringAttribute(t, r, "k8s.pod.ip", "1.1.1.1") }) @@ -350,8 +353,8 @@ func TestNilBatch(t *testing.T) { m := newMultiTest(t, NewFactory().CreateDefaultConfig(), nil) m.testConsume( context.Background(), - pdata.NewTraces(), - pdata.NewMetrics(), + ptrace.NewTraces(), + pmetric.NewMetrics(), generateLogs(), func(err error) { assert.NoError(t, err) @@ -446,7 +449,7 @@ func TestNoIP(t *testing.T) { m.assertBatchesLen(1) m.assertResourceObjectLen(0) - m.assertResource(0, func(res pdata.Resource) { + m.assertResource(0, func(res pcommon.Resource) { assert.Equal(t, 0, res.Attributes().Len()) }) } @@ -499,7 +502,7 @@ func TestIPSourceWithoutPodAssociation(t *testing.T) { metrics := generateMetrics() logs := generateLogs() - resources := []pdata.Resource{ + resources := []pcommon.Resource{ traces.ResourceSpans().At(0).Resource(), metrics.ResourceMetrics().At(0).Resource(), } @@ -515,7 +518,7 @@ func TestIPSourceWithoutPodAssociation(t *testing.T) { m.testConsume(ctx, traces, metrics, logs, nil) m.assertBatchesLen(i + 1) - m.assertResource(i, func(res pdata.Resource) { + m.assertResource(i, func(res pcommon.Resource) { require.Greater(t, res.Attributes().Len(), 0) assertResourceHasStringAttribute(t, res, "k8s.pod.ip", tc.out) }) @@ -581,7 +584,7 @@ func TestIPSourceWithPodAssociation(t *testing.T) { metrics := generateMetrics() logs := generateLogs() - resources := []pdata.Resource{ + resources := []pcommon.Resource{ traces.ResourceSpans().At(0).Resource(), metrics.ResourceMetrics().At(0).Resource(), logs.ResourceLogs().At(0).Resource(), @@ -593,7 +596,7 @@ func TestIPSourceWithPodAssociation(t *testing.T) { m.testConsume(ctx, traces, metrics, logs, nil) m.assertBatchesLen(i + 1) - m.assertResource(i, func(res pdata.Resource) { + m.assertResource(i, func(res pcommon.Resource) { require.Greater(t, res.Attributes().Len(), 0) assertResourceHasStringAttribute(t, res, tc.outLabel, tc.outValue) }) @@ -632,7 +635,7 @@ func TestPodUID(t *testing.T) { m.assertBatchesLen(1) m.assertResourceObjectLen(0) - m.assertResource(0, func(r pdata.Resource) { + m.assertResource(0, func(r pcommon.Resource) { require.Greater(t, r.Attributes().Len(), 0) assertResourceHasStringAttribute(t, r, "k8s.pod.uid", "ef10d10b-2da5-4030-812e-5f45c1531227") }) @@ -688,7 +691,7 @@ func TestProcessorAddLabels(t *testing.T) { m.assertBatchesLen(i + 1) m.assertResourceObjectLen(i) - m.assertResource(i, func(res pdata.Resource) { + m.assertResource(i, func(res pcommon.Resource) { require.Greater(t, res.Attributes().Len(), 0) assertResourceHasStringAttribute(t, res, "k8s.pod.ip", ip) for k, v := range attrs { @@ -831,7 +834,7 @@ func TestProcessorAddContainerAttributes(t *testing.T) { ) m.assertBatchesLen(1) - m.assertResource(0, func(r pdata.Resource) { + m.assertResource(0, func(r pcommon.Resource) { require.Equal(t, len(tt.wantAttrs), r.Attributes().Len()) for k, v := range tt.wantAttrs { assertResourceHasStringAttribute(t, r, k, v) @@ -876,7 +879,7 @@ func TestProcessorPicksUpPassthoughPodIp(t *testing.T) { m.assertResourceObjectLen(0) m.assertResourceAttributesLen(0, 3) - m.assertResource(0, func(res pdata.Resource) { + m.assertResource(0, func(res pcommon.Resource) { assertResourceHasStringAttribute(t, res, k8sIPLabelName, "2.2.2.2") assertResourceHasStringAttribute(t, res, "k", "v") assertResourceHasStringAttribute(t, res, "1", "2") @@ -1090,41 +1093,41 @@ func TestStartStop(t *testing.T) { assert.True(t, controller.HasStopped()) } -func assertResourceHasStringAttribute(t *testing.T, r pdata.Resource, k, v string) { +func assertResourceHasStringAttribute(t *testing.T, r pcommon.Resource, k, v string) { got, ok := r.Attributes().Get(k) require.True(t, ok, fmt.Sprintf("resource does not contain attribute %s", k)) - assert.EqualValues(t, pdata.ValueTypeString, got.Type(), "attribute %s is not of type string", k) + assert.EqualValues(t, pcommon.ValueTypeString, got.Type(), "attribute %s is not of type string", k) assert.EqualValues(t, v, got.StringVal(), "attribute %s is not equal to %s", k, v) } func Test_intFromAttribute(t *testing.T) { tests := []struct { name string - attrVal pdata.Value + attrVal pcommon.Value wantInt int wantErr bool }{ { name: "wrong-type", - attrVal: pdata.NewValueBool(true), + attrVal: pcommon.NewValueBool(true), wantInt: 0, wantErr: true, }, { name: "wrong-string-number", - attrVal: pdata.NewValueString("NaN"), + attrVal: pcommon.NewValueString("NaN"), wantInt: 0, wantErr: true, }, { name: "valid-string-number", - attrVal: pdata.NewValueString("3"), + attrVal: pcommon.NewValueString("3"), wantInt: 3, wantErr: false, }, { name: "valid-int-number", - attrVal: pdata.NewValueInt(1), + attrVal: pcommon.NewValueInt(1), wantInt: 1, wantErr: false, }, diff --git a/processor/metricsgenerationprocessor/go.mod b/processor/metricsgenerationprocessor/go.mod index 92685d851712..d9987ef3a55f 100644 --- a/processor/metricsgenerationprocessor/go.mod +++ b/processor/metricsgenerationprocessor/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -14,7 +14,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -22,7 +22,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -33,3 +32,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/metricsgenerationprocessor/go.sum b/processor/metricsgenerationprocessor/go.sum index e899f5a73fe9..2b7090fb26d6 100644 --- a/processor/metricsgenerationprocessor/go.sum +++ b/processor/metricsgenerationprocessor/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -70,7 +70,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -148,8 +147,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -164,10 +161,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -207,7 +204,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -230,7 +227,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/metricsgenerationprocessor/processor.go b/processor/metricsgenerationprocessor/processor.go index 61731f5e0d6f..214933c21ea9 100644 --- a/processor/metricsgenerationprocessor/processor.go +++ b/processor/metricsgenerationprocessor/processor.go @@ -18,7 +18,7 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -50,7 +50,7 @@ func (mgp *metricsGenerationProcessor) Start(context.Context, component.Host) er } // processMetrics implements the ProcessMetricsFunc type. -func (mgp *metricsGenerationProcessor) processMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (mgp *metricsGenerationProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { diff --git a/processor/metricsgenerationprocessor/processor_test.go b/processor/metricsgenerationprocessor/processor_test.go index 54fe87195dda..28c3e73c5536 100644 --- a/processor/metricsgenerationprocessor/processor_test.go +++ b/processor/metricsgenerationprocessor/processor_test.go @@ -24,7 +24,8 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type testMetric struct { @@ -40,8 +41,8 @@ type testMetricIntGauge struct { type metricsGenerationTest struct { name string rules []Rule - inMetrics pdata.Metrics - outMetrics pdata.Metrics + inMetrics pmetric.Metrics + outMetrics pmetric.Metrics } var ( @@ -318,16 +319,16 @@ func TestMetricsGenerationProcessor(t *testing.T) { require.Equal(t, eM.Name(), aM.Name()) - if eM.DataType() == pdata.MetricDataTypeGauge { + if eM.DataType() == pmetric.MetricDataTypeGauge { eDataPoints := eM.Gauge().DataPoints() aDataPoints := aM.Gauge().DataPoints() require.Equal(t, eDataPoints.Len(), aDataPoints.Len()) for j := 0; j < eDataPoints.Len(); j++ { switch eDataPoints.At(j).ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: require.Equal(t, eDataPoints.At(j).DoubleVal(), aDataPoints.At(j).DoubleVal()) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: require.Equal(t, eDataPoints.At(j).IntVal(), aDataPoints.At(j).IntVal()) } @@ -341,8 +342,8 @@ func TestMetricsGenerationProcessor(t *testing.T) { } } -func generateTestMetrics(tm testMetric) pdata.Metrics { - md := pdata.NewMetrics() +func generateTestMetrics(tm testMetric) pmetric.Metrics { + md := pmetric.NewMetrics() now := time.Now() rm := md.ResourceMetrics().AppendEmpty() @@ -350,10 +351,10 @@ func generateTestMetrics(tm testMetric) pdata.Metrics { for i, name := range tm.metricNames { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) for _, value := range tm.metricValues[i] { dp := m.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(10 * time.Second))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) dp.SetDoubleVal(value) } } @@ -361,8 +362,8 @@ func generateTestMetrics(tm testMetric) pdata.Metrics { return md } -func generateTestMetricsWithIntDatapoint(tm testMetricIntGauge) pdata.Metrics { - md := pdata.NewMetrics() +func generateTestMetricsWithIntDatapoint(tm testMetricIntGauge) pmetric.Metrics { + md := pmetric.NewMetrics() now := time.Now() rm := md.ResourceMetrics().AppendEmpty() @@ -370,10 +371,10 @@ func generateTestMetricsWithIntDatapoint(tm testMetricIntGauge) pdata.Metrics { for i, name := range tm.metricNames { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) for _, value := range tm.metricValues[i] { dp := m.Gauge().DataPoints().AppendEmpty() - dp.SetTimestamp(pdata.NewTimestampFromTime(now.Add(10 * time.Second))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(now.Add(10 * time.Second))) dp.SetIntVal(value) } } @@ -381,14 +382,14 @@ func generateTestMetricsWithIntDatapoint(tm testMetricIntGauge) pdata.Metrics { return md } -func getOutputForIntGaugeTest() pdata.Metrics { +func getOutputForIntGaugeTest() pmetric.Metrics { intGaugeOutputMetrics := generateTestMetricsWithIntDatapoint(testMetricIntGauge{ metricNames: []string{"metric_1", "metric_2"}, metricValues: [][]int64{{100}, {5}}, }) ilm := intGaugeOutputMetrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() doubleMetric := ilm.AppendEmpty() - doubleMetric.SetDataType(pdata.MetricDataTypeGauge) + doubleMetric.SetDataType(pmetric.MetricDataTypeGauge) doubleMetric.SetName("metric_calculated") neweDoubleDataPoint := doubleMetric.Gauge().DataPoints().AppendEmpty() neweDoubleDataPoint.SetDoubleVal(105) diff --git a/processor/metricsgenerationprocessor/utils.go b/processor/metricsgenerationprocessor/utils.go index 42414d4d5700..61aaf5f30b98 100644 --- a/processor/metricsgenerationprocessor/utils.go +++ b/processor/metricsgenerationprocessor/utils.go @@ -15,13 +15,13 @@ package metricsgenerationprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) -func getNameToMetricMap(rm pdata.ResourceMetrics) map[string]pdata.Metric { +func getNameToMetricMap(rm pmetric.ResourceMetrics) map[string]pmetric.Metric { ilms := rm.ScopeMetrics() - metricMap := make(map[string]pdata.Metric) + metricMap := make(map[string]pmetric.Metric) for i := 0; i < ilms.Len(); i++ { ilm := ilms.At(i) @@ -35,14 +35,14 @@ func getNameToMetricMap(rm pdata.ResourceMetrics) map[string]pdata.Metric { } // getMetricValue returns the value of the first data point from the given metric. -func getMetricValue(metric pdata.Metric) float64 { - if metric.DataType() == pdata.MetricDataTypeGauge { +func getMetricValue(metric pmetric.Metric) float64 { + if metric.DataType() == pmetric.MetricDataTypeGauge { dataPoints := metric.Gauge().DataPoints() if dataPoints.Len() > 0 { switch dataPoints.At(0).ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: return dataPoints.At(0).DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: return float64(dataPoints.At(0).IntVal()) } } @@ -54,7 +54,7 @@ func getMetricValue(metric pdata.Metric) float64 { // generateMetrics creates a new metric based on the given rule and add it to the Resource Metric. // The value for newly calculated metrics is always a floting point number and the dataType is set // as MetricDataTypeDoubleGauge. -func generateMetrics(rm pdata.ResourceMetrics, operand2 float64, rule internalRule, logger *zap.Logger) { +func generateMetrics(rm pmetric.ResourceMetrics, operand2 float64, rule internalRule, logger *zap.Logger) { ilms := rm.ScopeMetrics() for i := 0; i < ilms.Len(); i++ { ilm := ilms.At(i) @@ -63,22 +63,22 @@ func generateMetrics(rm pdata.ResourceMetrics, operand2 float64, rule internalRu metric := metricSlice.At(j) if metric.Name() == rule.metric1 { newMetric := appendMetric(ilm, rule.name, rule.unit) - newMetric.SetDataType(pdata.MetricDataTypeGauge) + newMetric.SetDataType(pmetric.MetricDataTypeGauge) addDoubleGaugeDataPoints(metric, newMetric, operand2, rule.operation, logger) } } } } -func addDoubleGaugeDataPoints(from pdata.Metric, to pdata.Metric, operand2 float64, operation string, logger *zap.Logger) { +func addDoubleGaugeDataPoints(from pmetric.Metric, to pmetric.Metric, operand2 float64, operation string, logger *zap.Logger) { dataPoints := from.Gauge().DataPoints() for i := 0; i < dataPoints.Len(); i++ { fromDataPoint := dataPoints.At(i) var operand1 float64 switch fromDataPoint.ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: operand1 = fromDataPoint.DoubleVal() - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: operand1 = float64(fromDataPoint.IntVal()) } @@ -89,7 +89,7 @@ func addDoubleGaugeDataPoints(from pdata.Metric, to pdata.Metric, operand2 float } } -func appendMetric(ilm pdata.ScopeMetrics, name, unit string) pdata.Metric { +func appendMetric(ilm pmetric.ScopeMetrics, name, unit string) pmetric.Metric { metric := ilm.Metrics().AppendEmpty() metric.SetName(name) metric.SetUnit(unit) diff --git a/processor/metricsgenerationprocessor/utils_test.go b/processor/metricsgenerationprocessor/utils_test.go index 001c6b9b69ce..aaa38ac2346c 100644 --- a/processor/metricsgenerationprocessor/utils_test.go +++ b/processor/metricsgenerationprocessor/utils_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -49,13 +49,13 @@ func TestCalculateValue(t *testing.T) { } func TestGetMetricValueWithNoDataPoint(t *testing.T) { - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() ms := rm.ScopeMetrics().AppendEmpty().Metrics() m := ms.AppendEmpty() m.SetName("metric_1") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) value := getMetricValue(md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0)) require.Equal(t, 0.0, value) diff --git a/processor/metricstransformprocessor/go.mod b/processor/metricstransformprocessor/go.mod index 6820d24c512b..59b198acad78 100644 --- a/processor/metricstransformprocessor/go.mod +++ b/processor/metricstransformprocessor/go.mod @@ -7,8 +7,8 @@ require ( github.com/google/go-cmp v0.5.7 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 @@ -20,21 +20,21 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect @@ -46,3 +46,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/metricstransformprocessor/go.sum b/processor/metricstransformprocessor/go.sum index 552103ace226..6577af7da9d7 100644 --- a/processor/metricstransformprocessor/go.sum +++ b/processor/metricstransformprocessor/go.sum @@ -18,7 +18,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -91,7 +91,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -123,8 +122,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -167,8 +166,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -184,10 +181,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -230,8 +229,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -255,8 +254,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/metricstransformprocessor/metrics_transform_processor.go b/processor/metricstransformprocessor/metrics_transform_processor.go index abbb6289ca76..0b5c1275d140 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor.go +++ b/processor/metricstransformprocessor/metrics_transform_processor.go @@ -25,7 +25,7 @@ import ( agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/timestamppb" @@ -210,11 +210,11 @@ func newMetricsTransformProcessor(logger *zap.Logger, internalTransforms []inter } // processMetrics implements the ProcessMetricsFunc type. -func (mtp *metricsTransformProcessor) processMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (mtp *metricsTransformProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() groupedMds := make([]*agentmetricspb.ExportMetricsServiceRequest, 0) - out := pdata.NewMetrics() + out := pmetric.NewMetrics() for i := 0; i < rms.Len(); i++ { node, resource, metrics := internaldata.ResourceMetricsToOC(rms.At(i)) diff --git a/processor/probabilisticsamplerprocessor/go.mod b/processor/probabilisticsamplerprocessor/go.mod index 3fe322788d94..12a5dde16318 100644 --- a/processor/probabilisticsamplerprocessor/go.mod +++ b/processor/probabilisticsamplerprocessor/go.mod @@ -5,21 +5,21 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -32,3 +32,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/probabilisticsamplerprocessor/go.sum b/processor/probabilisticsamplerprocessor/go.sum index da753b174779..30f30ad568e9 100644 --- a/processor/probabilisticsamplerprocessor/go.sum +++ b/processor/probabilisticsamplerprocessor/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -69,7 +69,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -99,8 +98,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -143,8 +142,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -159,10 +156,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -202,7 +201,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -224,7 +223,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/probabilisticsamplerprocessor/probabilisticsampler.go b/processor/probabilisticsamplerprocessor/probabilisticsampler.go index ddfbe602faa1..f325419f72f7 100644 --- a/processor/probabilisticsamplerprocessor/probabilisticsampler.go +++ b/processor/probabilisticsamplerprocessor/probabilisticsampler.go @@ -20,7 +20,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor/processorhelper" ) @@ -69,10 +70,10 @@ func newTracesProcessor(nextConsumer consumer.Traces, cfg *Config) (component.Tr processorhelper.WithCapabilities(consumer.Capabilities{MutatesData: true})) } -func (tsp *tracesamplerprocessor) processTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { - td.ResourceSpans().RemoveIf(func(rs pdata.ResourceSpans) bool { - rs.ScopeSpans().RemoveIf(func(ils pdata.ScopeSpans) bool { - ils.Spans().RemoveIf(func(s pdata.Span) bool { +func (tsp *tracesamplerprocessor) processTraces(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { + td.ResourceSpans().RemoveIf(func(rs ptrace.ResourceSpans) bool { + rs.ScopeSpans().RemoveIf(func(ils ptrace.ScopeSpans) bool { + ils.Spans().RemoveIf(func(s ptrace.Span) bool { sp := parseSpanSamplingPriority(s) if sp == doNotSampleSpan { // The OpenTelemetry mentions this as a "hint" we take a stronger @@ -105,7 +106,7 @@ func (tsp *tracesamplerprocessor) processTraces(_ context.Context, td pdata.Trac // decide if the span should be sampled or not. The usage of the tag follows the // OpenTracing semantic tags: // https://github.com/opentracing/specification/blob/main/semantic_conventions.md#span-tags-table -func parseSpanSamplingPriority(span pdata.Span) samplingPriority { +func parseSpanSamplingPriority(span ptrace.Span) samplingPriority { attribMap := span.Attributes() if attribMap.Len() <= 0 { return deferDecision @@ -124,21 +125,21 @@ func parseSpanSamplingPriority(span pdata.Span) samplingPriority { // client libraries it is also possible that the type was lost in translation // between different formats. switch samplingPriorityAttrib.Type() { - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: value := samplingPriorityAttrib.IntVal() if value == 0 { decision = doNotSampleSpan } else if value > 0 { decision = mustSampleSpan } - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: value := samplingPriorityAttrib.DoubleVal() if value == 0.0 { decision = doNotSampleSpan } else if value > 0.0 { decision = mustSampleSpan } - case pdata.ValueTypeString: + case pcommon.ValueTypeString: attribVal := samplingPriorityAttrib.StringVal() if value, err := strconv.ParseFloat(attribVal, 64); err == nil { if value == 0.0 { diff --git a/processor/probabilisticsamplerprocessor/probabilisticsampler_test.go b/processor/probabilisticsamplerprocessor/probabilisticsampler_test.go index b3d84475b12f..a67b04bee53e 100644 --- a/processor/probabilisticsamplerprocessor/probabilisticsampler_test.go +++ b/processor/probabilisticsamplerprocessor/probabilisticsampler_test.go @@ -25,8 +25,9 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" ) @@ -221,15 +222,15 @@ func Test_tracesamplerprocessor_SamplingPercentageRange_MultipleResourceSpans(t // Test_tracesamplerprocessor_SpanSamplingPriority checks if handling of "sampling.priority" is correct. func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { - singleSpanWithAttrib := func(key string, attribValue pdata.Value) pdata.Traces { - traces := pdata.NewTraces() + singleSpanWithAttrib := func(key string, attribValue pcommon.Value) ptrace.Traces { + traces := ptrace.NewTraces() initSpanWithAttributes(key, attribValue, traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty()) return traces } tests := []struct { name string cfg *Config - td pdata.Traces + td ptrace.Traces sampled bool }{ { @@ -240,7 +241,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "sampling.priority", - pdata.NewValueInt(2)), + pcommon.NewValueInt(2)), sampled: true, }, { @@ -251,7 +252,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "sampling.priority", - pdata.NewValueDouble(1)), + pcommon.NewValueDouble(1)), sampled: true, }, { @@ -262,7 +263,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "sampling.priority", - pdata.NewValueString("1")), + pcommon.NewValueString("1")), sampled: true, }, { @@ -273,7 +274,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "sampling.priority", - pdata.NewValueInt(0)), + pcommon.NewValueInt(0)), }, { name: "must_not_sample_double", @@ -283,7 +284,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "sampling.priority", - pdata.NewValueDouble(0)), + pcommon.NewValueDouble(0)), }, { name: "must_not_sample_string", @@ -293,7 +294,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "sampling.priority", - pdata.NewValueString("0")), + pcommon.NewValueString("0")), }, { name: "defer_sample_expect_not_sampled", @@ -303,7 +304,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "no.sampling.priority", - pdata.NewValueInt(2)), + pcommon.NewValueInt(2)), }, { name: "defer_sample_expect_sampled", @@ -313,7 +314,7 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { }, td: singleSpanWithAttrib( "no.sampling.priority", - pdata.NewValueInt(2)), + pcommon.NewValueInt(2)), sampled: true, }, } @@ -343,72 +344,72 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { func Test_parseSpanSamplingPriority(t *testing.T) { tests := []struct { name string - span pdata.Span + span ptrace.Span want samplingPriority }{ { name: "nil_span", - span: pdata.NewSpan(), + span: ptrace.NewSpan(), want: deferDecision, }, { name: "nil_attributes", - span: pdata.NewSpan(), + span: ptrace.NewSpan(), want: deferDecision, }, { name: "no_sampling_priority", - span: getSpanWithAttributes("key", pdata.NewValueBool(true)), + span: getSpanWithAttributes("key", pcommon.NewValueBool(true)), want: deferDecision, }, { name: "sampling_priority_int_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueInt(0)), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueInt(0)), want: doNotSampleSpan, }, { name: "sampling_priority_int_gt_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueInt(1)), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueInt(1)), want: mustSampleSpan, }, { name: "sampling_priority_int_lt_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueInt(-1)), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueInt(-1)), want: deferDecision, }, { name: "sampling_priority_double_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueDouble(0)), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueDouble(0)), want: doNotSampleSpan, }, { name: "sampling_priority_double_gt_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueDouble(1)), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueDouble(1)), want: mustSampleSpan, }, { name: "sampling_priority_double_lt_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueDouble(-1)), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueDouble(-1)), want: deferDecision, }, { name: "sampling_priority_string_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueString("0.0")), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueString("0.0")), want: doNotSampleSpan, }, { name: "sampling_priority_string_gt_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueString("0.5")), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueString("0.5")), want: mustSampleSpan, }, { name: "sampling_priority_string_lt_zero", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueString("-0.5")), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueString("-0.5")), want: deferDecision, }, { name: "sampling_priority_string_NaN", - span: getSpanWithAttributes("sampling.priority", pdata.NewValueString("NaN")), + span: getSpanWithAttributes("sampling.priority", pcommon.NewValueString("NaN")), want: deferDecision, }, } @@ -419,13 +420,13 @@ func Test_parseSpanSamplingPriority(t *testing.T) { } } -func getSpanWithAttributes(key string, value pdata.Value) pdata.Span { - span := pdata.NewSpan() +func getSpanWithAttributes(key string, value pcommon.Value) ptrace.Span { + span := ptrace.NewSpan() initSpanWithAttributes(key, value, span) return span } -func initSpanWithAttributes(key string, value pdata.Value, dest pdata.Span) { +func initSpanWithAttributes(key string, value pcommon.Value, dest ptrace.Span) { dest.SetName("spanName") dest.Attributes().Clear() dest.Attributes().Insert(key, value) @@ -448,14 +449,14 @@ func Test_hash(t *testing.T) { } } -// genRandomTestData generates a slice of pdata.Traces with the numBatches elements which one with +// genRandomTestData generates a slice of ptrace.Traces with the numBatches elements which one with // numTracesPerBatch spans (ie.: each span has a different trace ID). All spans belong to the specified // serviceName. -func genRandomTestData(numBatches, numTracesPerBatch int, serviceName string, resourceSpanCount int) (tdd []pdata.Traces) { +func genRandomTestData(numBatches, numTracesPerBatch int, serviceName string, resourceSpanCount int) (tdd []ptrace.Traces) { r := rand.New(rand.NewSource(1)) - var traceBatches []pdata.Traces + var traceBatches []ptrace.Traces for i := 0; i < numBatches; i++ { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(resourceSpanCount) for j := 0; j < resourceSpanCount; j++ { rs := traces.ResourceSpans().AppendEmpty() @@ -482,7 +483,7 @@ func genRandomTestData(numBatches, numTracesPerBatch int, serviceName string, re // assertSampledData checks for no repeated traceIDs and counts the number of spans on the sampled data for // the given service. -func assertSampledData(t *testing.T, sampled []pdata.Traces, serviceName string) (traceIDs map[[16]byte]bool, spanCount int) { +func assertSampledData(t *testing.T, sampled []ptrace.Traces, serviceName string) (traceIDs map[[16]byte]bool, spanCount int) { traceIDs = make(map[[16]byte]bool) for _, td := range sampled { rspans := td.ResourceSpans() diff --git a/processor/redactionprocessor/go.mod b/processor/redactionprocessor/go.mod index 7d86e8c7829f..a45c441c5891 100644 --- a/processor/redactionprocessor/go.mod +++ b/processor/redactionprocessor/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -14,7 +14,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -22,7 +22,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -33,3 +32,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/redactionprocessor/go.sum b/processor/redactionprocessor/go.sum index 0ddb36bf0794..dae3dc51a76d 100644 --- a/processor/redactionprocessor/go.sum +++ b/processor/redactionprocessor/go.sum @@ -16,7 +16,7 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -71,7 +71,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -101,8 +100,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -149,8 +148,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -165,10 +162,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -208,7 +205,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -231,7 +228,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/redactionprocessor/processor.go b/processor/redactionprocessor/processor.go index 346e5e79cde9..a01fd03ada7e 100644 --- a/processor/redactionprocessor/processor.go +++ b/processor/redactionprocessor/processor.go @@ -23,7 +23,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -62,7 +63,7 @@ func newRedaction(ctx context.Context, config *Config, logger *zap.Logger, next // processTraces implements ProcessMetricsFunc. It processes the incoming data // and returns the data to be sent to the next component -func (s *redaction) processTraces(ctx context.Context, batch pdata.Traces) (pdata.Traces, error) { +func (s *redaction) processTraces(ctx context.Context, batch ptrace.Traces) (ptrace.Traces, error) { for i := 0; i < batch.ResourceSpans().Len(); i++ { rs := batch.ResourceSpans().At(i) s.processResourceSpan(ctx, rs) @@ -72,7 +73,7 @@ func (s *redaction) processTraces(ctx context.Context, batch pdata.Traces) (pdat // processResourceSpan processes the RS and all of its spans and then returns the last // view metric context. The context can be used for tests -func (s *redaction) processResourceSpan(ctx context.Context, rs pdata.ResourceSpans) { +func (s *redaction) processResourceSpan(ctx context.Context, rs ptrace.ResourceSpans) { rsAttrs := rs.Resource().Attributes() // Attributes can be part of a resource span @@ -91,7 +92,7 @@ func (s *redaction) processResourceSpan(ctx context.Context, rs pdata.ResourceSp } // processAttrs redacts the attributes of a resource span or a span -func (s *redaction) processAttrs(_ context.Context, attributes *pdata.Map) { +func (s *redaction) processAttrs(_ context.Context, attributes *pcommon.Map) { // TODO: Use the context for recording metrics var toDelete []string var toBlock []string @@ -104,7 +105,7 @@ func (s *redaction) processAttrs(_ context.Context, attributes *pdata.Map) { // This sequence satisfies these performance constraints: // - Only range through all attributes once // - Don't mask any values if the whole attribute is slated for deletion - attributes.Range(func(k string, value pdata.Value) bool { + attributes.Range(func(k string, value pcommon.Value) bool { // Make a list of attribute keys to redact if _, allowed := s.allowList[k]; !allowed { toDelete = append(toDelete, k) @@ -120,7 +121,7 @@ func (s *redaction) processAttrs(_ context.Context, attributes *pdata.Map) { valueCopy := value.StringVal() maskedValue := compiledRE.ReplaceAllString(valueCopy, "****") - attributes.Update(k, pdata.NewValueString(maskedValue)) + attributes.Update(k, pcommon.NewValueString(maskedValue)) } } return true @@ -136,7 +137,7 @@ func (s *redaction) processAttrs(_ context.Context, attributes *pdata.Map) { } // ConsumeTraces implements the SpanProcessor interface -func (s *redaction) ConsumeTraces(ctx context.Context, batch pdata.Traces) error { +func (s *redaction) ConsumeTraces(ctx context.Context, batch ptrace.Traces) error { batch, err := s.processTraces(ctx, batch) if err != nil { return err @@ -147,7 +148,7 @@ func (s *redaction) ConsumeTraces(ctx context.Context, batch pdata.Traces) error } // summarizeRedactedSpan adds diagnostic information about redacted attribute keys -func (s *redaction) summarizeRedactedSpan(toDelete []string, attributes *pdata.Map) { +func (s *redaction) summarizeRedactedSpan(toDelete []string, attributes *pcommon.Map) { redactedSpanCount := int64(len(toDelete)) if redactedSpanCount == 0 { return @@ -155,15 +156,15 @@ func (s *redaction) summarizeRedactedSpan(toDelete []string, attributes *pdata.M // Record summary as span attributes if s.config.Summary == debug { sort.Strings(toDelete) - attributes.Insert(redactedKeys, pdata.NewValueString(strings.Join(toDelete, ","))) + attributes.Insert(redactedKeys, pcommon.NewValueString(strings.Join(toDelete, ","))) } if s.config.Summary == info || s.config.Summary == debug { - attributes.Insert(redactedKeyCount, pdata.NewValueInt(redactedSpanCount)) + attributes.Insert(redactedKeyCount, pcommon.NewValueInt(redactedSpanCount)) } } // summarizeMaskedSpan adds diagnostic information about masked attribute values -func (s *redaction) summarizeMaskedSpan(toBlock []string, attributes *pdata.Map) { +func (s *redaction) summarizeMaskedSpan(toBlock []string, attributes *pcommon.Map) { maskedSpanCount := int64(len(toBlock)) if maskedSpanCount == 0 { return @@ -171,10 +172,10 @@ func (s *redaction) summarizeMaskedSpan(toBlock []string, attributes *pdata.Map) // Records summary as span attributes if s.config.Summary == debug { sort.Strings(toBlock) - attributes.Insert(maskedValues, pdata.NewValueString(strings.Join(toBlock, ","))) + attributes.Insert(maskedValues, pcommon.NewValueString(strings.Join(toBlock, ","))) } if s.config.Summary == info || s.config.Summary == debug { - attributes.Insert(maskedValueCount, pdata.NewValueInt(maskedSpanCount)) + attributes.Insert(maskedValueCount, pcommon.NewValueInt(maskedSpanCount)) } } diff --git a/processor/redactionprocessor/processor_test.go b/processor/redactionprocessor/processor_test.go index 82e0b43ec196..889e5c3be82d 100644 --- a/processor/redactionprocessor/processor_test.go +++ b/processor/redactionprocessor/processor_test.go @@ -23,7 +23,8 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap/zaptest" ) @@ -56,13 +57,13 @@ func TestRedactUnknownAttributes(t *testing.T) { config := &Config{ AllowedKeys: []string{"group", "id", "name"}, } - allowed := map[string]pdata.Value{ - "group": pdata.NewValueString("temporary"), - "id": pdata.NewValueInt(5), - "name": pdata.NewValueString("placeholder"), + allowed := map[string]pcommon.Value{ + "group": pcommon.NewValueString("temporary"), + "id": pcommon.NewValueInt(5), + "name": pcommon.NewValueString("placeholder"), } - redacted := map[string]pdata.Value{ - "credit_card": pdata.NewValueString("4111111111111111"), + redacted := map[string]pcommon.Value{ + "credit_card": pcommon.NewValueString("4111111111111111"), } library, span, next := runTest(t, allowed, redacted, nil, config) @@ -93,16 +94,16 @@ func TestRedactSummaryDebug(t *testing.T) { BlockedValues: []string{"4[0-9]{12}(?:[0-9]{3})?"}, Summary: "debug", } - allowed := map[string]pdata.Value{ - "id": pdata.NewValueInt(5), - "group.id": pdata.NewValueString("some.valid.id"), - "member (id)": pdata.NewValueString("some other valid id"), + allowed := map[string]pcommon.Value{ + "id": pcommon.NewValueInt(5), + "group.id": pcommon.NewValueString("some.valid.id"), + "member (id)": pcommon.NewValueString("some other valid id"), } - masked := map[string]pdata.Value{ - "name": pdata.NewValueString("placeholder 4111111111111111"), + masked := map[string]pcommon.Value{ + "name": pcommon.NewValueString("placeholder 4111111111111111"), } - redacted := map[string]pdata.Value{ - "credit_card": pdata.NewValueString("4111111111111111"), + redacted := map[string]pcommon.Value{ + "credit_card": pcommon.NewValueString("4111111111111111"), } _, _, next := runTest(t, allowed, redacted, masked, config) @@ -143,14 +144,14 @@ func TestRedactSummaryInfo(t *testing.T) { AllowedKeys: []string{"id", "name", "group"}, BlockedValues: []string{"4[0-9]{12}(?:[0-9]{3})?"}, Summary: "info"} - allowed := map[string]pdata.Value{ - "id": pdata.NewValueInt(5), + allowed := map[string]pcommon.Value{ + "id": pcommon.NewValueInt(5), } - masked := map[string]pdata.Value{ - "name": pdata.NewValueString("placeholder 4111111111111111"), + masked := map[string]pcommon.Value{ + "name": pcommon.NewValueString("placeholder 4111111111111111"), } - redacted := map[string]pdata.Value{ - "credit_card": pdata.NewValueString("4111111111111111"), + redacted := map[string]pcommon.Value{ + "credit_card": pcommon.NewValueString("4111111111111111"), } _, _, next := runTest(t, allowed, redacted, masked, config) @@ -184,14 +185,14 @@ func TestRedactSummarySilent(t *testing.T) { config := &Config{AllowedKeys: []string{"id", "name", "group"}, BlockedValues: []string{"4[0-9]{12}(?:[0-9]{3})?"}, Summary: "silent"} - allowed := map[string]pdata.Value{ - "id": pdata.NewValueInt(5), + allowed := map[string]pcommon.Value{ + "id": pcommon.NewValueInt(5), } - masked := map[string]pdata.Value{ - "name": pdata.NewValueString("placeholder 4111111111111111"), + masked := map[string]pcommon.Value{ + "name": pcommon.NewValueString("placeholder 4111111111111111"), } - redacted := map[string]pdata.Value{ - "credit_card": pdata.NewValueString("4111111111111111"), + redacted := map[string]pcommon.Value{ + "credit_card": pcommon.NewValueString("4111111111111111"), } _, _, next := runTest(t, allowed, redacted, masked, config) @@ -218,11 +219,11 @@ func TestRedactSummarySilent(t *testing.T) { // summary attributes by default func TestRedactSummaryDefault(t *testing.T) { config := &Config{AllowedKeys: []string{"id", "name", "group"}} - allowed := map[string]pdata.Value{ - "id": pdata.NewValueInt(5), + allowed := map[string]pcommon.Value{ + "id": pcommon.NewValueInt(5), } - masked := map[string]pdata.Value{ - "name": pdata.NewValueString("placeholder 4111111111111111"), + masked := map[string]pcommon.Value{ + "name": pcommon.NewValueString("placeholder 4111111111111111"), } _, _, next := runTest(t, allowed, nil, masked, config) @@ -245,15 +246,15 @@ func TestMultipleBlockValues(t *testing.T) { config := &Config{AllowedKeys: []string{"id", "name", "mystery"}, BlockedValues: []string{"4[0-9]{12}(?:[0-9]{3})?", "(5[1-5][0-9]{3})"}, Summary: "debug"} - allowed := map[string]pdata.Value{ - "id": pdata.NewValueInt(5), - "mystery": pdata.NewValueString("mystery 52000"), + allowed := map[string]pcommon.Value{ + "id": pcommon.NewValueInt(5), + "mystery": pcommon.NewValueString("mystery 52000"), } - masked := map[string]pdata.Value{ - "name": pdata.NewValueString("placeholder 4111111111111111"), + masked := map[string]pcommon.Value{ + "name": pcommon.NewValueString("placeholder 4111111111111111"), } - redacted := map[string]pdata.Value{ - "credit_card": pdata.NewValueString("4111111111111111"), + redacted := map[string]pcommon.Value{ + "credit_card": pcommon.NewValueString("4111111111111111"), } _, _, next := runTest(t, allowed, redacted, masked, config) @@ -278,7 +279,7 @@ func TestMultipleBlockValues(t *testing.T) { assert.True(t, ok) sort.Strings(blockedKeys) assert.Equal(t, strings.Join(blockedKeys, ","), maskedValues.StringVal()) - maskedValues.Equal(pdata.NewValueString(strings.Join(blockedKeys, ","))) + maskedValues.Equal(pcommon.NewValueString(strings.Join(blockedKeys, ","))) maskedValueCount, ok := attr.Get(maskedValueCount) assert.True(t, ok) assert.Equal(t, int64(len(blockedKeys)), maskedValueCount.IntVal()) @@ -291,12 +292,12 @@ func TestMultipleBlockValues(t *testing.T) { // runTest transforms the test input data and passes it through the processor func runTest( t *testing.T, - allowed map[string]pdata.Value, - redacted map[string]pdata.Value, - masked map[string]pdata.Value, + allowed map[string]pcommon.Value, + redacted map[string]pcommon.Value, + masked map[string]pcommon.Value, config *Config, -) (pdata.InstrumentationScope, pdata.Span, *consumertest.TracesSink) { - inBatch := pdata.NewTraces() +) (pcommon.InstrumentationScope, ptrace.Span, *consumertest.TracesSink) { + inBatch := ptrace.NewTraces() rs := inBatch.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() @@ -304,7 +305,7 @@ func runTest( library.SetName("first-library") span := ils.Spans().AppendEmpty() span.SetName("first-batch-first-span") - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) length := len(allowed) + len(masked) + len(redacted) for k, v := range allowed { @@ -343,16 +344,16 @@ func BenchmarkRedactSummaryDebug(b *testing.B) { BlockedValues: []string{"4[0-9]{12}(?:[0-9]{3})?"}, Summary: "debug", } - allowed := map[string]pdata.Value{ - "id": pdata.NewValueInt(5), - "group.id": pdata.NewValueString("some.valid.id"), - "member (id)": pdata.NewValueString("some other valid id"), + allowed := map[string]pcommon.Value{ + "id": pcommon.NewValueInt(5), + "group.id": pcommon.NewValueString("some.valid.id"), + "member (id)": pcommon.NewValueString("some other valid id"), } - masked := map[string]pdata.Value{ - "name": pdata.NewValueString("placeholder 4111111111111111"), + masked := map[string]pcommon.Value{ + "name": pcommon.NewValueString("placeholder 4111111111111111"), } - redacted := map[string]pdata.Value{ - "credit_card": pdata.NewValueString("would be nice"), + redacted := map[string]pcommon.Value{ + "credit_card": pcommon.NewValueString("would be nice"), } ctx := context.Background() next := new(consumertest.TracesSink) @@ -372,14 +373,14 @@ func BenchmarkMaskSummaryDebug(b *testing.B) { BlockedValues: []string{"4[0-9]{12}(?:[0-9]{3})?", "(http|https|ftp):[\\/]{2}([a-zA-Z0-9\\-\\.]+\\.[a-zA-Z]{2,4})(:[0-9]+)?\\/?([a-zA-Z0-9\\-\\._\\?\\,\\'\\/\\\\\\+&%\\$#\\=~]*)"}, Summary: "debug", } - allowed := map[string]pdata.Value{ - "id": pdata.NewValueInt(5), - "group.id": pdata.NewValueString("some.valid.id"), - "member (id)": pdata.NewValueString("some other valid id"), + allowed := map[string]pcommon.Value{ + "id": pcommon.NewValueInt(5), + "group.id": pcommon.NewValueString("some.valid.id"), + "member (id)": pcommon.NewValueString("some other valid id"), } - masked := map[string]pdata.Value{ - "name": pdata.NewValueString("placeholder 4111111111111111"), - "url": pdata.NewValueString("https://www.this_is_testing_url.com"), + masked := map[string]pcommon.Value{ + "name": pcommon.NewValueString("placeholder 4111111111111111"), + "url": pcommon.NewValueString("https://www.this_is_testing_url.com"), } ctx := context.Background() next := new(consumertest.TracesSink) @@ -392,12 +393,12 @@ func BenchmarkMaskSummaryDebug(b *testing.B) { // runBenchmark transform benchmark input and runs it through the processor func runBenchmark( - allowed map[string]pdata.Value, - redacted map[string]pdata.Value, - masked map[string]pdata.Value, + allowed map[string]pcommon.Value, + redacted map[string]pcommon.Value, + masked map[string]pcommon.Value, processor *redaction, ) { - inBatch := pdata.NewTraces() + inBatch := ptrace.NewTraces() rs := inBatch.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() @@ -405,7 +406,7 @@ func runBenchmark( library.SetName("first-library") span := ils.Spans().AppendEmpty() span.SetName("first-batch-first-span") - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) for k, v := range allowed { span.Attributes().Upsert(k, v) diff --git a/processor/resourcedetectionprocessor/go.mod b/processor/resourcedetectionprocessor/go.mod index a61d2300864f..e622a36af420 100644 --- a/processor/resourcedetectionprocessor/go.mod +++ b/processor/resourcedetectionprocessor/go.mod @@ -12,8 +12,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 k8s.io/apimachinery v0.23.5 @@ -48,7 +49,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -67,7 +68,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.2.0 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect @@ -75,7 +75,7 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect @@ -104,3 +104,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/commo replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil => ../../internal/aws/ecsutil + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/resourcedetectionprocessor/go.sum b/processor/resourcedetectionprocessor/go.sum index 91eddd9538e5..8da42a27c4fb 100644 --- a/processor/resourcedetectionprocessor/go.sum +++ b/processor/resourcedetectionprocessor/go.sum @@ -141,7 +141,7 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -577,8 +577,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -796,8 +796,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -871,10 +869,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -1005,8 +1005,9 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go index 8ac9d6811b71..087f3e04069b 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go +++ b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2.go @@ -25,8 +25,8 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -63,8 +63,8 @@ func NewDetector(set component.ProcessorCreateSettings, dcfg internal.DetectorCo }, nil } -func (d *Detector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() if _, err = d.metadataProvider.instanceID(ctx); err != nil { d.logger.Debug("EC2 metadata unavailable", zap.Error(err)) return res, "", nil diff --git a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go index 3f8416c013e9..d60856bd543c 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go +++ b/processor/resourcedetectionprocessor/internal/aws/ec2/ec2_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -118,7 +118,7 @@ func TestDetector_Detect(t *testing.T) { name string fields fields args args - want pdata.Resource + want pcommon.Resource wantErr bool }{ { @@ -135,8 +135,8 @@ func TestDetector_Detect(t *testing.T) { retHostname: "example-hostname", isAvailable: true}}, args: args{ctx: context.Background()}, - want: func() pdata.Resource { - res := pdata.NewResource() + want: func() pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() attr.InsertString("cloud.account.id", "account1234") attr.InsertString("cloud.provider", "aws") @@ -157,8 +157,8 @@ func TestDetector_Detect(t *testing.T) { isAvailable: false, }}, args: args{ctx: context.Background()}, - want: func() pdata.Resource { - return pdata.NewResource() + want: func() pcommon.Resource { + return pcommon.NewResource() }(), wantErr: false}, { @@ -169,8 +169,8 @@ func TestDetector_Detect(t *testing.T) { isAvailable: true, }}, args: args{ctx: context.Background()}, - want: func() pdata.Resource { - return pdata.NewResource() + want: func() pcommon.Resource { + return pcommon.NewResource() }(), wantErr: true}, { @@ -182,8 +182,8 @@ func TestDetector_Detect(t *testing.T) { isAvailable: true, }}, args: args{ctx: context.Background()}, - want: func() pdata.Resource { - return pdata.NewResource() + want: func() pcommon.Resource { + return pcommon.NewResource() }(), wantErr: true}, } diff --git a/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go b/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go index 5dd04dbb2329..fc9b52b384eb 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go +++ b/processor/resourcedetectionprocessor/internal/aws/ecs/ecs.go @@ -21,8 +21,8 @@ import ( "strings" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/endpoints" @@ -54,8 +54,8 @@ func NewDetector(params component.ProcessorCreateSettings, _ internal.DetectorCo // Detect records metadata retrieved from the ECS Task Metadata Endpoint (TMDE) as resource attributes // TODO(willarmiros): Replace all attribute fields and enums with values defined in "conventions" once they exist -func (d *Detector) Detect(context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() // don't attempt to fetch metadata if there's no provider (incompatible env) if d.provider == nil { @@ -147,11 +147,11 @@ func parseRegionAndAccount(taskARN string) (region string, account string) { // "init" containers which only run at startup then shutdown (as indicated by the "KnownStatus" attribute), // containers not using AWS Logs, and those without log group metadata to get the final lists of valid log data // See: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint-v4.html#task-metadata-endpoint-v4-response -func getValidLogData(containers []ecsutil.ContainerMetadata, self *ecsutil.ContainerMetadata, account string) [4]pdata.Value { - logGroupNames := pdata.NewValueSlice() - logGroupArns := pdata.NewValueSlice() - logStreamNames := pdata.NewValueSlice() - logStreamArns := pdata.NewValueSlice() +func getValidLogData(containers []ecsutil.ContainerMetadata, self *ecsutil.ContainerMetadata, account string) [4]pcommon.Value { + logGroupNames := pcommon.NewValueSlice() + logGroupArns := pcommon.NewValueSlice() + logStreamNames := pcommon.NewValueSlice() + logStreamArns := pcommon.NewValueSlice() for _, container := range containers { logData := container.LogOptions @@ -168,7 +168,7 @@ func getValidLogData(containers []ecsutil.ContainerMetadata, self *ecsutil.Conta } } - return [4]pdata.Value{logGroupNames, logGroupArns, logStreamNames, logStreamArns} + return [4]pcommon.Value{logGroupNames, logGroupArns, logStreamNames, logStreamArns} } func constructLogGroupArn(region, account, group string) string { diff --git a/processor/resourcedetectionprocessor/internal/aws/ecs/ecs_test.go b/processor/resourcedetectionprocessor/internal/aws/ecs/ecs_test.go index 66e877996f5a..0c6329641cec 100644 --- a/processor/resourcedetectionprocessor/internal/aws/ecs/ecs_test.go +++ b/processor/resourcedetectionprocessor/internal/aws/ecs/ecs_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil/endpoints" @@ -105,7 +105,7 @@ func Test_ecsDetectV4(t *testing.T) { os.Clearenv() os.Setenv(endpoints.TaskMetadataEndpointV4EnvVar, "endpoint") - want := pdata.NewResource() + want := pcommon.NewResource() attr := want.Attributes() attr.InsertString("cloud.provider", "aws") attr.InsertString("cloud.platform", "aws_ecs") @@ -122,10 +122,10 @@ func Test_ecsDetectV4(t *testing.T) { attribVals := []string{"group", "arn:aws:logs:us-east-1:123456789123:log-group:group", "stream", "arn:aws:logs:us-east-1:123456789123:log-group:group:log-stream:stream"} for i, field := range attribFields { - ava := pdata.NewValueSlice() + ava := pcommon.NewValueSlice() av := ava.SliceVal() avs := av.AppendEmpty() - pdata.NewValueString(attribVals[i]).CopyTo(avs) + pcommon.NewValueString(attribVals[i]).CopyTo(avs) attr.Insert(field, ava) } @@ -141,7 +141,7 @@ func Test_ecsDetectV3(t *testing.T) { os.Clearenv() os.Setenv(endpoints.TaskMetadataEndpointV3EnvVar, "endpoint") - want := pdata.NewResource() + want := pcommon.NewResource() attr := want.Attributes() attr.InsertString("cloud.provider", "aws") attr.InsertString("cloud.platform", "aws_ecs") diff --git a/processor/resourcedetectionprocessor/internal/aws/eks/detector.go b/processor/resourcedetectionprocessor/internal/aws/eks/detector.go index a84282ad05f6..bf1ad9de91ee 100644 --- a/processor/resourcedetectionprocessor/internal/aws/eks/detector.go +++ b/processor/resourcedetectionprocessor/internal/aws/eks/detector.go @@ -20,8 +20,8 @@ import ( "os" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -66,8 +66,8 @@ func NewDetector(set component.ProcessorCreateSettings, _ internal.DetectorConfi } // Detect returns a Resource describing the Amazon EKS environment being run in. -func (detector *detector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (detector *detector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() //Check if running on EKS. isEKS, err := isEKS(ctx, detector.utils) diff --git a/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go b/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go index 00aba7987136..723eca643809 100644 --- a/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go +++ b/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk.go @@ -21,8 +21,8 @@ import ( "strconv" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" ) @@ -51,8 +51,8 @@ func NewDetector(component.ProcessorCreateSettings, internal.DetectorConfig) (in return &Detector{fs: &ebFileSystem{}}, nil } -func (d Detector) Detect(context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d Detector) Detect(context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() var conf io.ReadCloser if d.fs.IsWindows() { diff --git a/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk_test.go b/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk_test.go index 9bb819f5f4d6..913ee567ea2c 100644 --- a/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk_test.go +++ b/processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/elasticbeanstalk_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" ) @@ -95,7 +95,7 @@ func Test_AttributesDetectedSuccessfully(t *testing.T) { mfs := &mockFileSystem{exists: true, contents: xrayConf} d := Detector{fs: mfs} - want := pdata.NewResource() + want := pcommon.NewResource() attr := want.Attributes() attr.InsertString("cloud.provider", "aws") attr.InsertString("cloud.platform", "aws_elastic_beanstalk") diff --git a/processor/resourcedetectionprocessor/internal/azure/aks/aks.go b/processor/resourcedetectionprocessor/internal/azure/aks/aks.go index fd8d8a2b5658..2344385d8bbf 100644 --- a/processor/resourcedetectionprocessor/internal/azure/aks/aks.go +++ b/processor/resourcedetectionprocessor/internal/azure/aks/aks.go @@ -19,8 +19,8 @@ import ( "os" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/azure" @@ -43,8 +43,8 @@ func NewDetector(component.ProcessorCreateSettings, internal.DetectorConfig) (in return &Detector{provider: azure.NewProvider()}, nil } -func (d *Detector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() if !onK8s() { return res, "", nil diff --git a/processor/resourcedetectionprocessor/internal/azure/azure.go b/processor/resourcedetectionprocessor/internal/azure/azure.go index ed238b64f9c5..a3cd7813f57b 100644 --- a/processor/resourcedetectionprocessor/internal/azure/azure.go +++ b/processor/resourcedetectionprocessor/internal/azure/azure.go @@ -18,8 +18,8 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -47,8 +47,8 @@ func NewDetector(p component.ProcessorCreateSettings, cfg internal.DetectorConfi } // Detect detects system metadata and returns a resource with the available ones -func (d *Detector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() attrs := res.Attributes() compute, err := d.provider.Metadata(ctx) diff --git a/processor/resourcedetectionprocessor/internal/consul/consul.go b/processor/resourcedetectionprocessor/internal/consul/consul.go index b5fe44be8e44..a096eb9225b6 100644 --- a/processor/resourcedetectionprocessor/internal/consul/consul.go +++ b/processor/resourcedetectionprocessor/internal/consul/consul.go @@ -20,8 +20,8 @@ import ( "github.com/hashicorp/consul/api" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -71,8 +71,8 @@ func NewDetector(p component.ProcessorCreateSettings, dcfg internal.DetectorConf } // Detect detects system metadata and returns a resource with the available ones -func (d *Detector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() attrs := res.Attributes() metadata, err := d.provider.Metadata(ctx) diff --git a/processor/resourcedetectionprocessor/internal/docker/docker.go b/processor/resourcedetectionprocessor/internal/docker/docker.go index 94388c7ca769..ff1aad5eff76 100644 --- a/processor/resourcedetectionprocessor/internal/docker/docker.go +++ b/processor/resourcedetectionprocessor/internal/docker/docker.go @@ -19,8 +19,8 @@ import ( "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -50,8 +50,8 @@ func NewDetector(p component.ProcessorCreateSettings, dcfg internal.DetectorConf } // Detect detects system metadata and returns a resource with the available ones -func (d *Detector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() attrs := res.Attributes() osType, err := d.provider.OSType(ctx) diff --git a/processor/resourcedetectionprocessor/internal/env/env.go b/processor/resourcedetectionprocessor/internal/env/env.go index 51925562cb71..6fd99cdccf77 100644 --- a/processor/resourcedetectionprocessor/internal/env/env.go +++ b/processor/resourcedetectionprocessor/internal/env/env.go @@ -27,7 +27,7 @@ import ( "strings" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" ) @@ -52,8 +52,8 @@ func NewDetector(component.ProcessorCreateSettings, internal.DetectorConfig) (in return &Detector{}, nil } -func (d *Detector) Detect(context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() labels := strings.TrimSpace(os.Getenv(envVar)) if labels == "" { @@ -76,7 +76,7 @@ func (d *Detector) Detect(context.Context) (resource pdata.Resource, schemaURL s // string. Captures the trimmed key & value parts, and ignores any superfluous spaces. var labelRegex = regexp.MustCompile(`\s*([[:ascii:]]{1,256}?)\s*=\s*([[:ascii:]]{0,256}?)\s*(?:,|$)`) -func initializeAttributeMap(am pdata.Map, s string) error { +func initializeAttributeMap(am pcommon.Map, s string) error { matches := labelRegex.FindAllStringSubmatchIndex(s, -1) for len(matches) == 0 { diff --git a/processor/resourcedetectionprocessor/internal/env/env_test.go b/processor/resourcedetectionprocessor/internal/env/env_test.go index 86eb90d93b51..e9d2642a50df 100644 --- a/processor/resourcedetectionprocessor/internal/env/env_test.go +++ b/processor/resourcedetectionprocessor/internal/env/env_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" ) @@ -78,7 +78,7 @@ func TestInitializeAttributeMap(t *testing.T) { cases := []struct { name string encoded string - expectedAttributes pdata.Map + expectedAttributes pcommon.Map expectedError string }{ { @@ -114,7 +114,7 @@ func TestInitializeAttributeMap(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { - am := pdata.NewMap() + am := pcommon.NewMap() err := initializeAttributeMap(am, c.encoded) if c.expectedError != "" { diff --git a/processor/resourcedetectionprocessor/internal/gcp/gce/gce.go b/processor/resourcedetectionprocessor/internal/gcp/gce/gce.go index e749ed7715d8..f4b2448a39dc 100644 --- a/processor/resourcedetectionprocessor/internal/gcp/gce/gce.go +++ b/processor/resourcedetectionprocessor/internal/gcp/gce/gce.go @@ -20,8 +20,8 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/multierr" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -41,8 +41,8 @@ func NewDetector(component.ProcessorCreateSettings, internal.DetectorConfig) (in return &Detector{metadata: &gcp.MetadataImpl{}}, nil } -func (d *Detector) Detect(context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (d *Detector) Detect(context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() if !d.metadata.OnGCE() { return res, "", nil @@ -54,7 +54,7 @@ func (d *Detector) Detect(context.Context) (resource pdata.Resource, schemaURL s return res, conventions.SchemaURL, multierr.Append(cloudErr, hostErr) } -func (d *Detector) initializeCloudAttributes(attr pdata.Map) []error { +func (d *Detector) initializeCloudAttributes(attr pcommon.Map) []error { attr.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderGCP) attr.InsertString(conventions.AttributeCloudPlatform, conventions.AttributeCloudPlatformGCPComputeEngine) @@ -77,7 +77,7 @@ func (d *Detector) initializeCloudAttributes(attr pdata.Map) []error { return errors } -func (d *Detector) initializeHostAttributes(attr pdata.Map) []error { +func (d *Detector) initializeHostAttributes(attr pcommon.Map) []error { var errors []error hostname, err := d.metadata.Hostname() diff --git a/processor/resourcedetectionprocessor/internal/gcp/gke/gke.go b/processor/resourcedetectionprocessor/internal/gcp/gke/gke.go index 72c61d71578c..fa6c09252a4a 100644 --- a/processor/resourcedetectionprocessor/internal/gcp/gke/gke.go +++ b/processor/resourcedetectionprocessor/internal/gcp/gke/gke.go @@ -19,8 +19,8 @@ import ( "os" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -50,8 +50,8 @@ func NewDetector(params component.ProcessorCreateSettings, _ internal.DetectorCo } // Detect detects associated resources when running in GKE environment. -func (gke *Detector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { - res := pdata.NewResource() +func (gke *Detector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { + res := pcommon.NewResource() // Check if on GCP. if !gke.metadata.OnGCE() { diff --git a/processor/resourcedetectionprocessor/internal/resourcedetection.go b/processor/resourcedetectionprocessor/internal/resourcedetection.go index 15f394bdfbda..c0c18373ec8d 100644 --- a/processor/resourcedetectionprocessor/internal/resourcedetection.go +++ b/processor/resourcedetectionprocessor/internal/resourcedetection.go @@ -24,14 +24,14 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) type DetectorType string type Detector interface { - Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) + Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) } type DetectorConfig interface{} @@ -102,7 +102,7 @@ type ResourceProvider struct { } type resourceResult struct { - resource pdata.Resource + resource pcommon.Resource schemaURL string err error } @@ -116,7 +116,7 @@ func NewResourceProvider(logger *zap.Logger, timeout time.Duration, attributesTo } } -func (p *ResourceProvider) Get(ctx context.Context, client *http.Client) (resource pdata.Resource, schemaURL string, err error) { +func (p *ResourceProvider) Get(ctx context.Context, client *http.Client) (resource pcommon.Resource, schemaURL string, err error) { p.once.Do(func() { var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, client.Timeout) @@ -130,7 +130,7 @@ func (p *ResourceProvider) Get(ctx context.Context, client *http.Client) (resour func (p *ResourceProvider) detectResource(ctx context.Context) { p.detectedResource = &resourceResult{} - res := pdata.NewResource() + res := pcommon.NewResource() mergedSchemaURL := "" p.logger.Info("began detecting resource information") @@ -156,35 +156,35 @@ func (p *ResourceProvider) detectResource(ctx context.Context) { p.detectedResource.schemaURL = mergedSchemaURL } -func AttributesToMap(am pdata.Map) map[string]interface{} { +func AttributesToMap(am pcommon.Map) map[string]interface{} { mp := make(map[string]interface{}, am.Len()) - am.Range(func(k string, v pdata.Value) bool { + am.Range(func(k string, v pcommon.Value) bool { mp[k] = UnwrapAttribute(v) return true }) return mp } -func UnwrapAttribute(v pdata.Value) interface{} { +func UnwrapAttribute(v pcommon.Value) interface{} { switch v.Type() { - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return v.BoolVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return v.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return v.DoubleVal() - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return v.StringVal() - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: return getSerializableArray(v.SliceVal()) - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: return AttributesToMap(v.MapVal()) default: return nil } } -func getSerializableArray(inArr pdata.Slice) []interface{} { +func getSerializableArray(inArr pcommon.Slice) []interface{} { var outArr []interface{} for i := 0; i < inArr.Len(); i++ { outArr = append(outArr, UnwrapAttribute(inArr.At(i))) @@ -208,10 +208,10 @@ func MergeSchemaURL(currentSchemaURL string, newSchemaURL string) string { return currentSchemaURL } -func filterAttributes(am pdata.Map, attributesToKeep map[string]struct{}) []string { +func filterAttributes(am pcommon.Map, attributesToKeep map[string]struct{}) []string { if len(attributesToKeep) > 0 { droppedAttributes := make([]string, 0) - am.RemoveIf(func(k string, v pdata.Value) bool { + am.RemoveIf(func(k string, v pcommon.Value) bool { _, keep := attributesToKeep[k] if !keep { droppedAttributes = append(droppedAttributes, k) @@ -223,13 +223,13 @@ func filterAttributes(am pdata.Map, attributesToKeep map[string]struct{}) []stri return nil } -func MergeResource(to, from pdata.Resource, overrideTo bool) { +func MergeResource(to, from pcommon.Resource, overrideTo bool) { if IsEmptyResource(from) { return } toAttr := to.Attributes() - from.Attributes().Range(func(k string, v pdata.Value) bool { + from.Attributes().Range(func(k string, v pcommon.Value) bool { if overrideTo { toAttr.Upsert(k, v) } else { @@ -239,7 +239,7 @@ func MergeResource(to, from pdata.Resource, overrideTo bool) { }) } -func IsEmptyResource(res pdata.Resource) bool { +func IsEmptyResource(res pcommon.Resource) bool { return res.Attributes().Len() == 0 } diff --git a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go index 4443f9b00c8a..ebc3c3ab1c9f 100644 --- a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go +++ b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -36,9 +36,9 @@ type MockDetector struct { mock.Mock } -func (p *MockDetector) Detect(ctx context.Context) (pdata.Resource, string, error) { +func (p *MockDetector) Detect(ctx context.Context) (pcommon.Resource, string, error) { args := p.Called() - return args.Get(0).(pdata.Resource), "", args.Error(1) + return args.Get(0).(pcommon.Resource), "", args.Error(1) } type mockDetectorConfig struct{} @@ -50,13 +50,13 @@ func (d *mockDetectorConfig) GetConfigFromType(detectorType DetectorType) Detect func TestDetect(t *testing.T) { tests := []struct { name string - detectedResources []pdata.Resource - expectedResource pdata.Resource + detectedResources []pcommon.Resource + expectedResource pcommon.Resource attributes []string }{ { name: "Detect three resources", - detectedResources: []pdata.Resource{ + detectedResources: []pcommon.Resource{ NewResource(map[string]interface{}{"a": "1", "b": "2"}), NewResource(map[string]interface{}{"a": "11", "c": "3"}), NewResource(map[string]interface{}{"a": "12", "c": "3"}), @@ -65,7 +65,7 @@ func TestDetect(t *testing.T) { attributes: nil, }, { name: "Detect empty resources", - detectedResources: []pdata.Resource{ + detectedResources: []pcommon.Resource{ NewResource(map[string]interface{}{"a": "1", "b": "2"}), NewResource(map[string]interface{}{}), NewResource(map[string]interface{}{"a": "11"}), @@ -74,7 +74,7 @@ func TestDetect(t *testing.T) { attributes: nil, }, { name: "Detect non-string resources", - detectedResources: []pdata.Resource{ + detectedResources: []pcommon.Resource{ NewResource(map[string]interface{}{"bool": true, "int": int64(2), "double": 0.5}), NewResource(map[string]interface{}{"bool": false}), NewResource(map[string]interface{}{"a": "11"}), @@ -83,7 +83,7 @@ func TestDetect(t *testing.T) { attributes: nil, }, { name: "Filter to one attribute", - detectedResources: []pdata.Resource{ + detectedResources: []pcommon.Resource{ NewResource(map[string]interface{}{"a": "1", "b": "2"}), NewResource(map[string]interface{}{"a": "11", "c": "3"}), NewResource(map[string]interface{}{"a": "12", "c": "3"}), @@ -146,7 +146,7 @@ func TestDetectResource_Error(t *testing.T) { md1.On("Detect").Return(NewResource(map[string]interface{}{"a": "1", "b": "2"}), nil) md2 := &MockDetector{} - md2.On("Detect").Return(pdata.NewResource(), errors.New("err1")) + md2.On("Detect").Return(pcommon.NewResource(), errors.New("err1")) p := NewResourceProvider(zap.NewNop(), time.Second, nil, md1, md2) _, _, err := p.Get(context.Background(), http.DefaultClient) @@ -156,10 +156,10 @@ func TestDetectResource_Error(t *testing.T) { func TestMergeResource(t *testing.T) { for _, tt := range []struct { name string - res1 pdata.Resource - res2 pdata.Resource + res1 pcommon.Resource + res2 pcommon.Resource overrideTo bool - expected pdata.Resource + expected pcommon.Resource }{ { name: "override non-empty resources", @@ -169,14 +169,14 @@ func TestMergeResource(t *testing.T) { expected: NewResource(map[string]interface{}{"a": "1", "b": "2", "c": "3"}), }, { name: "empty resource", - res1: pdata.NewResource(), + res1: pcommon.NewResource(), res2: NewResource(map[string]interface{}{"a": "1", "c": "3"}), overrideTo: false, expected: NewResource(map[string]interface{}{"a": "1", "c": "3"}), }, } { t.Run(tt.name, func(t *testing.T) { - out := pdata.NewResource() + out := pcommon.NewResource() tt.res1.CopyTo(out) MergeResource(out, tt.res2, tt.overrideTo) tt.expected.Attributes().Sort() @@ -195,10 +195,10 @@ func NewMockParallelDetector() *MockParallelDetector { return &MockParallelDetector{ch: make(chan struct{})} } -func (p *MockParallelDetector) Detect(ctx context.Context) (pdata.Resource, string, error) { +func (p *MockParallelDetector) Detect(ctx context.Context) (pcommon.Resource, string, error) { <-p.ch args := p.Called() - return args.Get(0).(pdata.Resource), "", args.Error(1) + return args.Get(0).(pcommon.Resource), "", args.Error(1) } // TestDetectResource_Parallel validates that Detect is only called once, even if there @@ -213,7 +213,7 @@ func TestDetectResource_Parallel(t *testing.T) { md2.On("Detect").Return(NewResource(map[string]interface{}{"a": "11", "c": "3"}), nil) md3 := NewMockParallelDetector() - md3.On("Detect").Return(pdata.NewResource(), errors.New("an error")) + md3.On("Detect").Return(pcommon.NewResource(), errors.New("an error")) expectedResource := NewResource(map[string]interface{}{"a": "1", "b": "2", "c": "3"}) expectedResource.Attributes().Sort() @@ -253,7 +253,7 @@ func TestFilterAttributes_Match(t *testing.T) { "host.name": {}, "host.id": {}, } - attr := pdata.NewMap() + attr := pcommon.NewMap() attr.InsertString("host.name", "test") attr.InsertString("host.id", "test") attr.InsertString("drop.this", "test") @@ -276,7 +276,7 @@ func TestFilterAttributes_NoMatch(t *testing.T) { m := map[string]struct{}{ "cloud.region": {}, } - attr := pdata.NewMap() + attr := pcommon.NewMap() attr.InsertString("host.name", "test") attr.InsertString("host.id", "test") @@ -293,7 +293,7 @@ func TestFilterAttributes_NoMatch(t *testing.T) { func TestFilterAttributes_NilAttributes(t *testing.T) { var m map[string]struct{} - attr := pdata.NewMap() + attr := pcommon.NewMap() attr.InsertString("host.name", "test") attr.InsertString("host.id", "test") @@ -310,7 +310,7 @@ func TestFilterAttributes_NilAttributes(t *testing.T) { func TestFilterAttributes_NoAttributes(t *testing.T) { m := make(map[string]struct{}) - attr := pdata.NewMap() + attr := pcommon.NewMap() attr.InsertString("host.name", "test") attr.InsertString("host.id", "test") @@ -339,17 +339,17 @@ func TestAttributesToMap(t *testing.T) { int64(42), }, } - attr := pdata.NewMap() + attr := pcommon.NewMap() attr.InsertString("str", "a") attr.InsertInt("int", 5) attr.InsertDouble("double", 5.0) attr.InsertBool("bool", true) - avm := pdata.NewValueMap() + avm := pcommon.NewValueMap() innerAttr := avm.MapVal() innerAttr.InsertString("inner", "val") attr.Insert("map", avm) - ava := pdata.NewValueSlice() + ava := pcommon.NewValueSlice() arrayAttr := ava.SliceVal() arrayAttr.EnsureCapacity(2) arrayAttr.AppendEmpty().SetStringVal("inner") diff --git a/processor/resourcedetectionprocessor/internal/system/system.go b/processor/resourcedetectionprocessor/internal/system/system.go index b94378eb29d0..c273f437efff 100644 --- a/processor/resourcedetectionprocessor/internal/system/system.go +++ b/processor/resourcedetectionprocessor/internal/system/system.go @@ -20,8 +20,8 @@ import ( "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -56,10 +56,10 @@ func NewDetector(p component.ProcessorCreateSettings, dcfg internal.DetectorConf } // Detect detects system metadata and returns a resource with the available ones -func (d *Detector) Detect(_ context.Context) (resource pdata.Resource, schemaURL string, err error) { +func (d *Detector) Detect(_ context.Context) (resource pcommon.Resource, schemaURL string, err error) { var hostname string - res := pdata.NewResource() + res := pcommon.NewResource() attrs := res.Attributes() osType, err := d.provider.OSType() diff --git a/processor/resourcedetectionprocessor/internal/testutils.go b/processor/resourcedetectionprocessor/internal/testutils.go index 9026fc6dd387..00369b91ea74 100644 --- a/processor/resourcedetectionprocessor/internal/testutils.go +++ b/processor/resourcedetectionprocessor/internal/testutils.go @@ -14,34 +14,34 @@ package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" -import "go.opentelemetry.io/collector/model/pdata" +import "go.opentelemetry.io/collector/pdata/pcommon" -func NewResource(mp map[string]interface{}) pdata.Resource { - res := pdata.NewResource() +func NewResource(mp map[string]interface{}) pcommon.Resource { + res := pcommon.NewResource() attr := res.Attributes() fillAttributeMap(mp, attr) return res } -func NewAttributeMap(mp map[string]interface{}) pdata.Map { - attr := pdata.NewMap() +func NewAttributeMap(mp map[string]interface{}) pcommon.Map { + attr := pcommon.NewMap() fillAttributeMap(mp, attr) return attr } -func fillAttributeMap(mp map[string]interface{}, attr pdata.Map) { +func fillAttributeMap(mp map[string]interface{}, attr pcommon.Map) { attr.Clear() attr.EnsureCapacity(len(mp)) for k, v := range mp { switch t := v.(type) { case bool: - attr.Insert(k, pdata.NewValueBool(t)) + attr.Insert(k, pcommon.NewValueBool(t)) case int64: - attr.Insert(k, pdata.NewValueInt(t)) + attr.Insert(k, pcommon.NewValueInt(t)) case float64: - attr.Insert(k, pdata.NewValueDouble(t)) + attr.Insert(k, pcommon.NewValueDouble(t)) case string: - attr.Insert(k, pdata.NewValueString(t)) + attr.Insert(k, pcommon.NewValueString(t)) } } } diff --git a/processor/resourcedetectionprocessor/resourcedetection_processor.go b/processor/resourcedetectionprocessor/resourcedetection_processor.go index e71d44b6a5bd..91a3cf04f85e 100644 --- a/processor/resourcedetectionprocessor/resourcedetection_processor.go +++ b/processor/resourcedetectionprocessor/resourcedetection_processor.go @@ -19,14 +19,17 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" ) type resourceDetectionProcessor struct { provider *internal.ResourceProvider - resource pdata.Resource + resource pcommon.Resource schemaURL string override bool httpClientSettings confighttp.HTTPClientSettings @@ -43,7 +46,7 @@ func (rdp *resourceDetectionProcessor) Start(ctx context.Context, host component } // processTraces implements the ProcessTracesFunc type. -func (rdp *resourceDetectionProcessor) processTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { +func (rdp *resourceDetectionProcessor) processTraces(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { rs := td.ResourceSpans() for i := 0; i < rs.Len(); i++ { rss := rs.At(i) @@ -55,7 +58,7 @@ func (rdp *resourceDetectionProcessor) processTraces(_ context.Context, td pdata } // processMetrics implements the ProcessMetricsFunc type. -func (rdp *resourceDetectionProcessor) processMetrics(_ context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (rdp *resourceDetectionProcessor) processMetrics(_ context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { rss := rm.At(i) @@ -67,7 +70,7 @@ func (rdp *resourceDetectionProcessor) processMetrics(_ context.Context, md pdat } // processLogs implements the ProcessLogsFunc type. -func (rdp *resourceDetectionProcessor) processLogs(_ context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (rdp *resourceDetectionProcessor) processLogs(_ context.Context, ld plog.Logs) (plog.Logs, error) { rl := ld.ResourceLogs() for i := 0; i < rl.Len(); i++ { rss := rl.At(i) diff --git a/processor/resourcedetectionprocessor/resourcedetection_processor_test.go b/processor/resourcedetectionprocessor/resourcedetection_processor_test.go index 850b5793638f..085820d27751 100644 --- a/processor/resourcedetectionprocessor/resourcedetection_processor_test.go +++ b/processor/resourcedetectionprocessor/resourcedetection_processor_test.go @@ -29,7 +29,10 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" internaldata "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -41,9 +44,9 @@ type MockDetector struct { mock.Mock } -func (p *MockDetector) Detect(ctx context.Context) (resource pdata.Resource, schemaURL string, err error) { +func (p *MockDetector) Detect(ctx context.Context) (resource pcommon.Resource, schemaURL string, err error) { args := p.Called() - return args.Get(0).(pdata.Resource), "", args.Error(1) + return args.Get(0).(pcommon.Resource), "", args.Error(1) } func TestResourceProcessor(t *testing.T) { @@ -51,10 +54,10 @@ func TestResourceProcessor(t *testing.T) { name string detectorKeys []string override bool - sourceResource pdata.Resource - detectedResource pdata.Resource + sourceResource pcommon.Resource + detectedResource pcommon.Resource detectedError error - expectedResource pdata.Resource + expectedResource pcommon.Resource expectedNewError string }{ { @@ -121,20 +124,20 @@ func TestResourceProcessor(t *testing.T) { }, { name: "Source resource is nil", - sourceResource: pdata.NewResource(), + sourceResource: pcommon.NewResource(), detectedResource: internal.NewResource(map[string]interface{}{"host.name": "node"}), expectedResource: internal.NewResource(map[string]interface{}{"host.name": "node"}), }, { name: "Detected resource is nil", sourceResource: internal.NewResource(map[string]interface{}{"host.name": "node"}), - detectedResource: pdata.NewResource(), + detectedResource: pcommon.NewResource(), expectedResource: internal.NewResource(map[string]interface{}{"host.name": "node"}), }, { name: "Both resources are nil", - sourceResource: pdata.NewResource(), - detectedResource: pdata.NewResource(), + sourceResource: pcommon.NewResource(), + detectedResource: pcommon.NewResource(), expectedResource: internal.NewResource(map[string]interface{}{}), }, { @@ -197,7 +200,7 @@ func TestResourceProcessor(t *testing.T) { require.NoError(t, err) defer func() { assert.NoError(t, rtp.Shutdown(context.Background())) }() - td := pdata.NewTraces() + td := ptrace.NewTraces() tt.sourceResource.CopyTo(td.ResourceSpans().AppendEmpty().Resource()) err = rtp.ConsumeTraces(context.Background(), td) @@ -230,7 +233,7 @@ func TestResourceProcessor(t *testing.T) { require.NoError(t, err) defer func() { assert.NoError(t, rmp.Shutdown(context.Background())) }() - // TODO create pdata.Metrics directly when this is no longer internal + // TODO create pmetric.Metrics directly when this is no longer internal err = rmp.ConsumeMetrics(context.Background(), internaldata.OCToMetrics(nil, oCensusResource(tt.sourceResource), nil)) require.NoError(t, err) got = tmn.AllMetrics()[0].ResourceMetrics().At(0).Resource() @@ -261,7 +264,7 @@ func TestResourceProcessor(t *testing.T) { require.NoError(t, err) defer func() { assert.NoError(t, rlp.Shutdown(context.Background())) }() - ld := pdata.NewLogs() + ld := plog.NewLogs() tt.sourceResource.CopyTo(ld.ResourceLogs().AppendEmpty().Resource()) err = rlp.ConsumeLogs(context.Background(), ld) @@ -275,13 +278,13 @@ func TestResourceProcessor(t *testing.T) { } } -func oCensusResource(res pdata.Resource) *resourcepb.Resource { +func oCensusResource(res pcommon.Resource) *resourcepb.Resource { if res.Attributes().Len() == 0 { return &resourcepb.Resource{} } mp := make(map[string]string, res.Attributes().Len()) - res.Attributes().Range(func(k string, v pdata.Value) bool { + res.Attributes().Range(func(k string, v pcommon.Value) bool { mp[k] = v.StringVal() return true }) @@ -297,7 +300,7 @@ func benchmarkConsumeTraces(b *testing.B, cfg *Config) { b.ResetTimer() for n := 0; n < b.N; n++ { // TODO use testbed.PerfTestDataProvider here once that includes resources - processor.ConsumeTraces(context.Background(), pdata.NewTraces()) + processor.ConsumeTraces(context.Background(), ptrace.NewTraces()) } } @@ -319,7 +322,7 @@ func benchmarkConsumeMetrics(b *testing.B, cfg *Config) { b.ResetTimer() for n := 0; n < b.N; n++ { // TODO use testbed.PerfTestDataProvider here once that includes resources - processor.ConsumeMetrics(context.Background(), pdata.NewMetrics()) + processor.ConsumeMetrics(context.Background(), pmetric.NewMetrics()) } } @@ -341,7 +344,7 @@ func benchmarkConsumeLogs(b *testing.B, cfg *Config) { b.ResetTimer() for n := 0; n < b.N; n++ { // TODO use testbed.PerfTestDataProvider here once that includes resources - processor.ConsumeLogs(context.Background(), pdata.NewLogs()) + processor.ConsumeLogs(context.Background(), plog.NewLogs()) } } diff --git a/processor/resourceprocessor/go.mod b/processor/resourceprocessor/go.mod index 2dcb00e6d0a5..380cad4dde92 100644 --- a/processor/resourceprocessor/go.mod +++ b/processor/resourceprocessor/go.mod @@ -5,15 +5,15 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -32,3 +32,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/resourceprocessor/go.sum b/processor/resourceprocessor/go.sum index da753b174779..937a18c5c7bf 100644 --- a/processor/resourceprocessor/go.sum +++ b/processor/resourceprocessor/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -69,7 +69,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -99,8 +98,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -159,10 +158,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -202,7 +201,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -224,7 +223,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/resourceprocessor/resource_processor.go b/processor/resourceprocessor/resource_processor.go index 58aa37794be0..62a7bed674e2 100644 --- a/processor/resourceprocessor/resource_processor.go +++ b/processor/resourceprocessor/resource_processor.go @@ -17,7 +17,9 @@ package resourceprocessor // import "github.com/open-telemetry/opentelemetry-col import ( "context" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" @@ -28,7 +30,7 @@ type resourceProcessor struct { attrProc *attraction.AttrProc } -func (rp *resourceProcessor) processTraces(ctx context.Context, td pdata.Traces) (pdata.Traces, error) { +func (rp *resourceProcessor) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rp.attrProc.Process(ctx, rp.logger, rss.At(i).Resource().Attributes()) @@ -36,7 +38,7 @@ func (rp *resourceProcessor) processTraces(ctx context.Context, td pdata.Traces) return td, nil } -func (rp *resourceProcessor) processMetrics(ctx context.Context, md pdata.Metrics) (pdata.Metrics, error) { +func (rp *resourceProcessor) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { rp.attrProc.Process(ctx, rp.logger, rms.At(i).Resource().Attributes()) @@ -44,7 +46,7 @@ func (rp *resourceProcessor) processMetrics(ctx context.Context, md pdata.Metric return md, nil } -func (rp *resourceProcessor) processLogs(ctx context.Context, ld pdata.Logs) (pdata.Logs, error) { +func (rp *resourceProcessor) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) { rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { rp.attrProc.Process(ctx, rp.logger, rls.At(i).Resource().Attributes()) diff --git a/processor/resourceprocessor/resource_processor_test.go b/processor/resourceprocessor/resource_processor_test.go index 05cd3f82fcd0..1cd39f340277 100644 --- a/processor/resourceprocessor/resource_processor_test.go +++ b/processor/resourceprocessor/resource_processor_test.go @@ -23,7 +23,9 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/attraction" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/testdata" @@ -170,7 +172,7 @@ func TestResourceProcessorError(t *testing.T) { require.Nil(t, rlp) } -func generateTraceData(attributes map[string]string) pdata.Traces { +func generateTraceData(attributes map[string]string) ptrace.Traces { td := testdata.GenerateTracesOneSpanNoResource() if attributes == nil { return td @@ -183,7 +185,7 @@ func generateTraceData(attributes map[string]string) pdata.Traces { return td } -func generateMetricData(attributes map[string]string) pdata.Metrics { +func generateMetricData(attributes map[string]string) pmetric.Metrics { md := testdata.GenerateMetricsOneMetricNoResource() if attributes == nil { return md @@ -196,7 +198,7 @@ func generateMetricData(attributes map[string]string) pdata.Metrics { return md } -func generateLogData(attributes map[string]string) pdata.Logs { +func generateLogData(attributes map[string]string) plog.Logs { ld := testdata.GenerateLogsOneLogRecordNoResource() if attributes == nil { return ld diff --git a/processor/routingprocessor/extract.go b/processor/routingprocessor/extract.go index 37233ba2ae8e..21aeeeeb3b15 100644 --- a/processor/routingprocessor/extract.go +++ b/processor/routingprocessor/extract.go @@ -18,7 +18,7 @@ import ( "context" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "google.golang.org/grpc/metadata" ) @@ -41,7 +41,7 @@ func newExtractor(fromAttr string, logger *zap.Logger) extractor { } // extractAttrFromResource extract string value from the requested resource attribute. -func (e extractor) extractAttrFromResource(r pdata.Resource) string { +func (e extractor) extractAttrFromResource(r pcommon.Resource) string { firstResourceAttributes := r.Attributes() routingAttribute, found := firstResourceAttributes.Get(e.fromAttr) if !found { diff --git a/processor/routingprocessor/extract_test.go b/processor/routingprocessor/extract_test.go index d8cff409c850..18680c53f30f 100644 --- a/processor/routingprocessor/extract_test.go +++ b/processor/routingprocessor/extract_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "google.golang.org/grpc/metadata" ) @@ -28,7 +28,7 @@ func TestExtractorForTraces_FromContext(t *testing.T) { testcases := []struct { name string ctxFunc func() context.Context - tracesFunc func() pdata.Traces + tracesFunc func() ptrace.Traces fromAttr string expectedValue string }{ @@ -39,8 +39,8 @@ func TestExtractorForTraces_FromContext(t *testing.T) { metadata.Pairs("X-Tenant", "acme"), ) }, - tracesFunc: func() pdata.Traces { - return pdata.NewTraces() + tracesFunc: func() ptrace.Traces { + return ptrace.NewTraces() }, fromAttr: "X-Tenant", expectedValue: "acme", @@ -50,8 +50,8 @@ func TestExtractorForTraces_FromContext(t *testing.T) { ctxFunc: func() context.Context { return context.Background() }, - tracesFunc: func() pdata.Traces { - return pdata.NewTraces() + tracesFunc: func() ptrace.Traces { + return ptrace.NewTraces() }, fromAttr: "X-Tenant", expectedValue: "", @@ -63,8 +63,8 @@ func TestExtractorForTraces_FromContext(t *testing.T) { metadata.Pairs("X-Tenant", ""), ) }, - tracesFunc: func() pdata.Traces { - return pdata.NewTraces() + tracesFunc: func() ptrace.Traces { + return ptrace.NewTraces() }, fromAttr: "X-Tenant", expectedValue: "", @@ -76,8 +76,8 @@ func TestExtractorForTraces_FromContext(t *testing.T) { metadata.Pairs("X-Tenant", "globex", "X-Tenant", "acme"), ) }, - tracesFunc: func() pdata.Traces { - traces := pdata.NewTraces() + tracesFunc: func() ptrace.Traces { + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty() traces.ResourceSpans().At(0).Resource(). Attributes().InsertString("k8s.namespace.name", "namespace-1") @@ -104,7 +104,7 @@ func TestExtractorForTraces_FromResourceAttribute(t *testing.T) { testcases := []struct { name string ctxFunc func() context.Context - tracesFunc func() pdata.Traces + tracesFunc func() ptrace.Traces fromAttr string expectedValue string }{ @@ -113,8 +113,8 @@ func TestExtractorForTraces_FromResourceAttribute(t *testing.T) { ctxFunc: func() context.Context { return context.Background() }, - tracesFunc: func() pdata.Traces { - traces := pdata.NewTraces() + tracesFunc: func() ptrace.Traces { + traces := ptrace.NewTraces() rSpans := traces.ResourceSpans().AppendEmpty() rSpans.Resource().Attributes(). InsertString("k8s.namespace.name", "namespace-1") @@ -130,8 +130,8 @@ func TestExtractorForTraces_FromResourceAttribute(t *testing.T) { metadata.Pairs("k8s.namespace.name", "namespace-1-from-context"), ) }, - tracesFunc: func() pdata.Traces { - traces := pdata.NewTraces() + tracesFunc: func() ptrace.Traces { + traces := ptrace.NewTraces() rSpans := traces.ResourceSpans().AppendEmpty() rSpans.Resource().Attributes(). InsertString("k8s.namespace.name", "namespace-1") diff --git a/processor/routingprocessor/factory_test.go b/processor/routingprocessor/factory_test.go index 6e4593714212..be3cb86f1d3a 100644 --- a/processor/routingprocessor/factory_test.go +++ b/processor/routingprocessor/factory_test.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor/processorhelper" "go.opentelemetry.io/collector/service/servicetest" "go.uber.org/zap" @@ -221,6 +221,6 @@ func TestShutdown(t *testing.T) { type mockProcessor struct{} -func (mp *mockProcessor) processTraces(context.Context, pdata.Traces) (pdata.Traces, error) { - return pdata.NewTraces(), nil +func (mp *mockProcessor) processTraces(context.Context, ptrace.Traces) (ptrace.Traces, error) { + return ptrace.NewTraces(), nil } diff --git a/processor/routingprocessor/go.mod b/processor/routingprocessor/go.mod index 7298065be77d..b00834cdb9f0 100644 --- a/processor/routingprocessor/go.mod +++ b/processor/routingprocessor/go.mod @@ -5,8 +5,8 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaegerexporter v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 ) @@ -14,7 +14,7 @@ require ( require ( cloud.google.com/go/compute v1.5.0 // indirect github.com/apache/thrift v0.16.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -24,7 +24,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/jaegertracing/jaeger v1.32.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -33,17 +33,17 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect @@ -59,3 +59,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/exporter/jaege replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/routingprocessor/go.sum b/processor/routingprocessor/go.sum index a4fd5758dd7c..4ae0dcf0fa73 100644 --- a/processor/routingprocessor/go.sum +++ b/processor/routingprocessor/go.sum @@ -72,8 +72,8 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -201,7 +201,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -241,8 +240,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -299,8 +298,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -330,10 +327,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -342,7 +341,7 @@ go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= @@ -433,8 +432,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/processor/routingprocessor/processor.go b/processor/routingprocessor/processor.go index c9ab7ec4217b..1c84d7a2a2b4 100644 --- a/processor/routingprocessor/processor.go +++ b/processor/routingprocessor/processor.go @@ -21,7 +21,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -66,7 +68,7 @@ func (e *processorImp) Shutdown(context.Context) error { return nil } -func (e *processorImp) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (e *processorImp) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { routedTraces := e.router.RouteTraces(ctx, td) for _, rt := range routedTraces { for _, exp := range rt.exporters { @@ -80,7 +82,7 @@ func (e *processorImp) ConsumeTraces(ctx context.Context, td pdata.Traces) error return nil } -func (e *processorImp) ConsumeMetrics(ctx context.Context, tm pdata.Metrics) error { +func (e *processorImp) ConsumeMetrics(ctx context.Context, tm pmetric.Metrics) error { routedMetrics := e.router.RouteMetrics(ctx, tm) for _, rm := range routedMetrics { for _, exp := range rm.exporters { @@ -94,7 +96,7 @@ func (e *processorImp) ConsumeMetrics(ctx context.Context, tm pdata.Metrics) err return nil } -func (e *processorImp) ConsumeLogs(ctx context.Context, tl pdata.Logs) error { +func (e *processorImp) ConsumeLogs(ctx context.Context, tl plog.Logs) error { routedLogs := e.router.RouteLogs(ctx, tl) for _, rl := range routedLogs { for _, exp := range rl.exporters { diff --git a/processor/routingprocessor/processor_test.go b/processor/routingprocessor/processor_test.go index 470d8529ecff..ed6a4d32f182 100644 --- a/processor/routingprocessor/processor_test.go +++ b/processor/routingprocessor/processor_test.go @@ -28,7 +28,9 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "google.golang.org/grpc/metadata" ) @@ -195,7 +197,7 @@ func TestTraces_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { }, }) - tr := pdata.NewTraces() + tr := ptrace.NewTraces() rl := tr.ResourceSpans().AppendEmpty() rl.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -218,7 +220,7 @@ func TestTraces_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { // The numbers below stem from the fact that data is routed and grouped // per resource attribute which is used for routing. - // Hence the first 2 traces are grouped together under one pdata.Logs. + // Hence the first 2 traces are grouped together under one plog.Logs. assert.Equal(t, 1, defaultExp.getTraceCount(), "one log should be routed to default exporter", ) @@ -256,7 +258,7 @@ func TestTraces_RoutingWorks_Context(t *testing.T) { }) require.NoError(t, exp.Start(context.Background(), host)) - tr := pdata.NewTraces() + tr := ptrace.NewTraces() rs := tr.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -321,7 +323,7 @@ func TestTraces_RoutingWorks_ResourceAttribute(t *testing.T) { require.NoError(t, exp.Start(context.Background(), host)) t.Run("non default route is properly used", func(t *testing.T) { - tr := pdata.NewTraces() + tr := ptrace.NewTraces() rs := tr.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -335,7 +337,7 @@ func TestTraces_RoutingWorks_ResourceAttribute(t *testing.T) { }) t.Run("default route is taken when no matching route can be found", func(t *testing.T) { - tr := pdata.NewTraces() + tr := ptrace.NewTraces() rs := tr.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString("X-Tenant", "some-custom-value") @@ -379,7 +381,7 @@ func TestTraces_RoutingWorks_ResourceAttribute_DropsRoutingAttribute(t *testing. }) require.NoError(t, exp.Start(context.Background(), host)) - tr := pdata.NewTraces() + tr := ptrace.NewTraces() rm := tr.ResourceSpans().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "acme") rm.Resource().Attributes().InsertString("attr", "acme") @@ -444,24 +446,24 @@ func TestMetrics_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { }, }) - m := pdata.NewMetrics() + m := pmetric.NewMetrics() rm := m.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "acme") metric := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetName("cpu") rm = m.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "acme") metric = rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetName("cpu_system") rm = m.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "something-else") metric = rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetName("cpu_idle") ctx := context.Background() @@ -470,7 +472,7 @@ func TestMetrics_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { // The numbers below stem from the fact that data is routed and grouped // per resource attribute which is used for routing. - // Hence the first 2 metrics are grouped together under one pdata.Metrics. + // Hence the first 2 metrics are grouped together under one pmetric.Metrics. assert.Equal(t, 1, defaultExp.getMetricCount(), "one metric should be routed to default exporter", ) @@ -508,7 +510,7 @@ func TestMetrics_RoutingWorks_Context(t *testing.T) { }) require.NoError(t, exp.Start(context.Background(), host)) - m := pdata.NewMetrics() + m := pmetric.NewMetrics() rm := m.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -573,7 +575,7 @@ func TestMetrics_RoutingWorks_ResourceAttribute(t *testing.T) { require.NoError(t, exp.Start(context.Background(), host)) t.Run("non default route is properly used", func(t *testing.T) { - m := pdata.NewMetrics() + m := pmetric.NewMetrics() rm := m.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -587,7 +589,7 @@ func TestMetrics_RoutingWorks_ResourceAttribute(t *testing.T) { }) t.Run("default route is taken when no matching route can be found", func(t *testing.T) { - m := pdata.NewMetrics() + m := pmetric.NewMetrics() rm := m.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "some-custom-value") @@ -631,7 +633,7 @@ func TestMetrics_RoutingWorks_ResourceAttribute_DropsRoutingAttribute(t *testing }) require.NoError(t, exp.Start(context.Background(), host)) - m := pdata.NewMetrics() + m := pmetric.NewMetrics() rm := m.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "acme") rm.Resource().Attributes().InsertString("attr", "acme") @@ -679,7 +681,7 @@ func TestLogs_RoutingWorks_Context(t *testing.T) { }) require.NoError(t, exp.Start(context.Background(), host)) - l := pdata.NewLogs() + l := plog.NewLogs() rl := l.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -744,7 +746,7 @@ func TestLogs_RoutingWorks_ResourceAttribute(t *testing.T) { require.NoError(t, exp.Start(context.Background(), host)) t.Run("non default route is properly used", func(t *testing.T) { - l := pdata.NewLogs() + l := plog.NewLogs() rl := l.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -758,7 +760,7 @@ func TestLogs_RoutingWorks_ResourceAttribute(t *testing.T) { }) t.Run("default route is taken when no matching route can be found", func(t *testing.T) { - l := pdata.NewLogs() + l := plog.NewLogs() rl := l.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertString("X-Tenant", "some-custom-value") @@ -802,7 +804,7 @@ func TestLogs_RoutingWorks_ResourceAttribute_DropsRoutingAttribute(t *testing.T) }) require.NoError(t, exp.Start(context.Background(), host)) - l := pdata.NewLogs() + l := plog.NewLogs() rm := l.ResourceLogs().AppendEmpty() rm.Resource().Attributes().InsertString("X-Tenant", "acme") rm.Resource().Attributes().InsertString("attr", "acme") @@ -849,7 +851,7 @@ func TestLogs_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { }, }) - l := pdata.NewLogs() + l := plog.NewLogs() rl := l.ResourceLogs().AppendEmpty() rl.Resource().Attributes().InsertString("X-Tenant", "acme") @@ -872,7 +874,7 @@ func TestLogs_AreCorrectlySplitPerResourceAttributeRouting(t *testing.T) { // The numbers below stem from the fact that data is routed and grouped // per resource attribute which is used for routing. - // Hence the first 2 metrics are grouped together under one pdata.Logs. + // Hence the first 2 metrics are grouped together under one plog.Logs. assert.Equal(t, 1, defaultExp.getLogCount(), "one log should be routed to default exporter", ) @@ -914,7 +916,7 @@ func Benchmark_MetricsRouting_ResourceAttribute(b *testing.B) { exp.Start(context.Background(), host) for i := 0; i < b.N; i++ { - m := pdata.NewMetrics() + m := pmetric.NewMetrics() rm := m.ResourceMetrics().AppendEmpty() attrs := rm.Resource().Attributes() @@ -954,14 +956,14 @@ func (m *mockComponent) Shutdown(context.Context) error { type mockMetricsExporter struct { mockComponent metricCount int32 - metrics []pdata.Metrics + metrics []pmetric.Metrics } func (m *mockMetricsExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (m *mockMetricsExporter) ConsumeMetrics(_ context.Context, metrics pdata.Metrics) error { +func (m *mockMetricsExporter) ConsumeMetrics(_ context.Context, metrics pmetric.Metrics) error { atomic.AddInt32(&m.metricCount, 1) m.metrics = append(m.metrics, metrics) return nil @@ -974,14 +976,14 @@ func (m *mockMetricsExporter) getMetricCount() int { type mockLogsExporter struct { mockComponent logCount int32 - logs []pdata.Logs + logs []plog.Logs } func (m *mockLogsExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (m *mockLogsExporter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { +func (m *mockLogsExporter) ConsumeLogs(_ context.Context, logs plog.Logs) error { atomic.AddInt32(&m.logCount, 1) m.logs = append(m.logs, logs) return nil @@ -994,14 +996,14 @@ func (m *mockLogsExporter) getLogCount() int { type mockTracesExporter struct { mockComponent traceCount int32 - traces []pdata.Traces + traces []ptrace.Traces } func (m *mockTracesExporter) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (m *mockTracesExporter) ConsumeTraces(_ context.Context, traces pdata.Traces) error { +func (m *mockTracesExporter) ConsumeTraces(_ context.Context, traces ptrace.Traces) error { atomic.AddInt32(&m.traceCount, 1) m.traces = append(m.traces, traces) return nil diff --git a/processor/routingprocessor/router.go b/processor/routingprocessor/router.go index 5950b616e10a..1077c8889216 100644 --- a/processor/routingprocessor/router.go +++ b/processor/routingprocessor/router.go @@ -21,14 +21,17 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) // router routes logs, metrics and traces using the configured attributes and // attribute sources. // Upon routing it also groups the logs, metrics and spans into a joint upper level -// structure (pdata.Logs, pdata.Metrics and pdata.Traces respectively) in order +// structure (plog.Logs, pmetric.Metrics and ptrace.Traces respectively) in order // to not cause higher CPU usage in the exporters when exproting data (it's always // better to batch before exporting). type router struct { @@ -56,11 +59,11 @@ func newRouter(config Config, logger *zap.Logger) *router { } type routedMetrics struct { - metrics pdata.Metrics + metrics pmetric.Metrics exporters []component.MetricsExporter } -func (r *router) RouteMetrics(ctx context.Context, tm pdata.Metrics) []routedMetrics { +func (r *router) RouteMetrics(ctx context.Context, tm pmetric.Metrics) []routedMetrics { switch r.config.AttributeSource { case contextAttributeSource: fallthrough @@ -72,18 +75,18 @@ func (r *router) RouteMetrics(ctx context.Context, tm pdata.Metrics) []routedMet } } -func (r *router) removeRoutingAttribute(resource pdata.Resource) { +func (r *router) removeRoutingAttribute(resource pcommon.Resource) { resource.Attributes().Remove(r.config.FromAttribute) } -func (r *router) routeMetricsForResource(_ context.Context, tm pdata.Metrics) []routedMetrics { - // routingEntry is used to group pdata.ResourceMetrics that are routed to +func (r *router) routeMetricsForResource(_ context.Context, tm pmetric.Metrics) []routedMetrics { + // routingEntry is used to group pmetric.ResourceMetrics that are routed to // the same set of exporters. // This way we're not ending up with all the metrics split up which would cause // higher CPU usage. type routingEntry struct { exporters []component.MetricsExporter - resMetrics pdata.ResourceMetricsSlice + resMetrics pmetric.ResourceMetricsSlice } routingMap := map[string]routingEntry{} @@ -104,7 +107,7 @@ func (r *router) routeMetricsForResource(_ context.Context, tm pdata.Metrics) [] if rEntry, ok := routingMap[attrValue]; ok { resMetrics.MoveTo(rEntry.resMetrics.AppendEmpty()) } else { - new := pdata.NewResourceMetricsSlice() + new := pmetric.NewResourceMetricsSlice() resMetrics.MoveTo(new.AppendEmpty()) routingMap[attrValue] = routingEntry{ @@ -114,11 +117,11 @@ func (r *router) routeMetricsForResource(_ context.Context, tm pdata.Metrics) [] } } - // Now that we have all the ResourceMetrics grouped, let's create pdata.Metrics + // Now that we have all the ResourceMetrics grouped, let's create pmetric.Metrics // for each group and add it to the returned routedMetrics slice. ret := make([]routedMetrics, 0, len(routingMap)) for _, rEntry := range routingMap { - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() metrics.ResourceMetrics().EnsureCapacity(rEntry.resMetrics.Len()) rEntry.resMetrics.MoveAndAppendTo(metrics.ResourceMetrics()) @@ -131,7 +134,7 @@ func (r *router) routeMetricsForResource(_ context.Context, tm pdata.Metrics) [] return ret } -func (r *router) routeMetricsForContext(ctx context.Context, tm pdata.Metrics) routedMetrics { +func (r *router) routeMetricsForContext(ctx context.Context, tm pmetric.Metrics) routedMetrics { value := r.extractor.extractFromContext(ctx) exp, ok := r.metricsExporters[value] @@ -149,11 +152,11 @@ func (r *router) routeMetricsForContext(ctx context.Context, tm pdata.Metrics) r } type routedTraces struct { - traces pdata.Traces + traces ptrace.Traces exporters []component.TracesExporter } -func (r *router) RouteTraces(ctx context.Context, tr pdata.Traces) []routedTraces { +func (r *router) RouteTraces(ctx context.Context, tr ptrace.Traces) []routedTraces { switch r.config.AttributeSource { case contextAttributeSource: fallthrough @@ -165,14 +168,14 @@ func (r *router) RouteTraces(ctx context.Context, tr pdata.Traces) []routedTrace } } -func (r *router) routeTracesForResource(_ context.Context, tr pdata.Traces) []routedTraces { - // routingEntry is used to group pdata.ResourceSpans that are routed to +func (r *router) routeTracesForResource(_ context.Context, tr ptrace.Traces) []routedTraces { + // routingEntry is used to group ptrace.ResourceSpans that are routed to // the same set of exporters. // This way we're not ending up with all the logs split up which would cause // higher CPU usage. type routingEntry struct { exporters []component.TracesExporter - resSpans pdata.ResourceSpansSlice + resSpans ptrace.ResourceSpansSlice } routingMap := map[string]routingEntry{} @@ -193,7 +196,7 @@ func (r *router) routeTracesForResource(_ context.Context, tr pdata.Traces) []ro if rEntry, ok := routingMap[attrValue]; ok { resSpans.MoveTo(rEntry.resSpans.AppendEmpty()) } else { - new := pdata.NewResourceSpansSlice() + new := ptrace.NewResourceSpansSlice() resSpans.MoveTo(new.AppendEmpty()) routingMap[attrValue] = routingEntry{ @@ -203,11 +206,11 @@ func (r *router) routeTracesForResource(_ context.Context, tr pdata.Traces) []ro } } - // Now that we have all the ResourceSpans grouped, let's create pdata.Traces + // Now that we have all the ResourceSpans grouped, let's create ptrace.Traces // for each group and add it to the returned routedTraces slice. ret := make([]routedTraces, 0, len(routingMap)) for _, rEntry := range routingMap { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() traces.ResourceSpans().EnsureCapacity(rEntry.resSpans.Len()) rEntry.resSpans.MoveAndAppendTo(traces.ResourceSpans()) @@ -220,7 +223,7 @@ func (r *router) routeTracesForResource(_ context.Context, tr pdata.Traces) []ro return ret } -func (r *router) routeTracesForContext(ctx context.Context, tr pdata.Traces) routedTraces { +func (r *router) routeTracesForContext(ctx context.Context, tr ptrace.Traces) routedTraces { value := r.extractor.extractFromContext(ctx) exp, ok := r.tracesExporters[value] @@ -238,11 +241,11 @@ func (r *router) routeTracesForContext(ctx context.Context, tr pdata.Traces) rou } type routedLogs struct { - logs pdata.Logs + logs plog.Logs exporters []component.LogsExporter } -func (r *router) RouteLogs(ctx context.Context, tl pdata.Logs) []routedLogs { +func (r *router) RouteLogs(ctx context.Context, tl plog.Logs) []routedLogs { switch r.config.AttributeSource { case contextAttributeSource: fallthrough @@ -254,14 +257,14 @@ func (r *router) RouteLogs(ctx context.Context, tl pdata.Logs) []routedLogs { } } -func (r *router) routeLogsForResource(_ context.Context, tl pdata.Logs) []routedLogs { - // routingEntry is used to group pdata.ResourceLogs that are routed to +func (r *router) routeLogsForResource(_ context.Context, tl plog.Logs) []routedLogs { + // routingEntry is used to group plog.ResourceLogs that are routed to // the same set of exporters. // This way we're not ending up with all the logs split up which would cause // higher CPU usage. type routingEntry struct { exporters []component.LogsExporter - resLogs pdata.ResourceLogsSlice + resLogs plog.ResourceLogsSlice } routingMap := map[string]routingEntry{} @@ -282,7 +285,7 @@ func (r *router) routeLogsForResource(_ context.Context, tl pdata.Logs) []routed if rEntry, ok := routingMap[attrValue]; ok { resLogs.MoveTo(rEntry.resLogs.AppendEmpty()) } else { - new := pdata.NewResourceLogsSlice() + new := plog.NewResourceLogsSlice() resLogs.MoveTo(new.AppendEmpty()) routingMap[attrValue] = routingEntry{ @@ -292,11 +295,11 @@ func (r *router) routeLogsForResource(_ context.Context, tl pdata.Logs) []routed } } - // Now that we have all the ResourceLogs grouped, let's create pdata.Logs + // Now that we have all the ResourceLogs grouped, let's create plog.Logs // for each group and add it to the returned routedLogs slice. ret := make([]routedLogs, 0, len(routingMap)) for _, rEntry := range routingMap { - logs := pdata.NewLogs() + logs := plog.NewLogs() logs.ResourceLogs().EnsureCapacity(rEntry.resLogs.Len()) rEntry.resLogs.MoveAndAppendTo(logs.ResourceLogs()) @@ -309,7 +312,7 @@ func (r *router) routeLogsForResource(_ context.Context, tl pdata.Logs) []routed return ret } -func (r *router) routeLogsForContext(ctx context.Context, tl pdata.Logs) routedLogs { +func (r *router) routeLogsForContext(ctx context.Context, tl plog.Logs) routedLogs { value := r.extractor.extractFromContext(ctx) exp, ok := r.logsExporters[value] diff --git a/processor/spanmetricsprocessor/config.go b/processor/spanmetricsprocessor/config.go index 92ad9d4b8a13..5be4cd7f4009 100644 --- a/processor/spanmetricsprocessor/config.go +++ b/processor/spanmetricsprocessor/config.go @@ -18,7 +18,7 @@ import ( "time" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/service/featuregate" ) @@ -72,9 +72,9 @@ var dropSanitizationGate = featuregate.Gate{ // GetAggregationTemporality converts the string value given in the config into a MetricAggregationTemporality. // Returns cumulative, unless delta is correctly specified. -func (c Config) GetAggregationTemporality() pdata.MetricAggregationTemporality { +func (c Config) GetAggregationTemporality() pmetric.MetricAggregationTemporality { if c.AggregationTemporality == delta { - return pdata.MetricAggregationTemporalityDelta + return pmetric.MetricAggregationTemporalityDelta } - return pdata.MetricAggregationTemporalityCumulative + return pmetric.MetricAggregationTemporalityCumulative } diff --git a/processor/spanmetricsprocessor/config_test.go b/processor/spanmetricsprocessor/config_test.go index aa78f6725a67..315a7e852da0 100644 --- a/processor/spanmetricsprocessor/config_test.go +++ b/processor/spanmetricsprocessor/config_test.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/processor/batchprocessor" "go.opentelemetry.io/collector/receiver/otlpreceiver" "go.opentelemetry.io/collector/service/servicetest" @@ -115,11 +115,11 @@ func TestLoadConfig(t *testing.T) { func TestGetAggregationTemporality(t *testing.T) { cfg := &Config{AggregationTemporality: delta} - assert.Equal(t, pdata.MetricAggregationTemporalityDelta, cfg.GetAggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityDelta, cfg.GetAggregationTemporality()) cfg = &Config{AggregationTemporality: cumulative} - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, cfg.GetAggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, cfg.GetAggregationTemporality()) cfg = &Config{} - assert.Equal(t, pdata.MetricAggregationTemporalityCumulative, cfg.GetAggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityCumulative, cfg.GetAggregationTemporality()) } diff --git a/processor/spanmetricsprocessor/go.mod b/processor/spanmetricsprocessor/go.mod index f459ec8470b0..245d0e4ee94d 100644 --- a/processor/spanmetricsprocessor/go.mod +++ b/processor/spanmetricsprocessor/go.mod @@ -8,8 +8,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusexporter v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 ) @@ -19,7 +20,7 @@ require ( github.com/apache/thrift v0.16.0 // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect @@ -34,7 +35,7 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/jaegertracing/jaeger v1.32.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -106,3 +107,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver => ../../receiver/jaegerreceiver replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => ../../receiver/prometheusreceiver + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/spanmetricsprocessor/go.sum b/processor/spanmetricsprocessor/go.sum index f7c9a6bbaaef..3b5a8ff62bc2 100644 --- a/processor/spanmetricsprocessor/go.sum +++ b/processor/spanmetricsprocessor/go.sum @@ -52,8 +52,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -239,8 +239,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -392,10 +392,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -407,7 +409,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= diff --git a/processor/spanmetricsprocessor/mocks/MetricsExporter.go b/processor/spanmetricsprocessor/mocks/MetricsExporter.go index 2e6e0f8742e2..3e379026c569 100644 --- a/processor/spanmetricsprocessor/mocks/MetricsExporter.go +++ b/processor/spanmetricsprocessor/mocks/MetricsExporter.go @@ -22,7 +22,7 @@ import ( mock "github.com/stretchr/testify/mock" component "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - pdata "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricsExporter is an autogenerated mock type for the MetricsExporter type @@ -35,11 +35,11 @@ func (_m *MetricsExporter) Capabilities() consumer.Capabilities { } // ConsumeMetrics provides a mock function with given fields: ctx, md -func (_m *MetricsExporter) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (_m *MetricsExporter) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { ret := _m.Called(ctx, md) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, pdata.Metrics) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, pmetric.Metrics) error); ok { r0 = rf(ctx, md) } else { r0 = ret.Error(0) diff --git a/processor/spanmetricsprocessor/mocks/TracesConsumer.go b/processor/spanmetricsprocessor/mocks/TracesConsumer.go index 07f410b70547..9bafb2f97c75 100644 --- a/processor/spanmetricsprocessor/mocks/TracesConsumer.go +++ b/processor/spanmetricsprocessor/mocks/TracesConsumer.go @@ -21,7 +21,7 @@ import ( mock "github.com/stretchr/testify/mock" "go.opentelemetry.io/collector/consumer" - pdata "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" ) // TracesConsumer is an autogenerated mock type for the TracesConsumer type @@ -34,11 +34,11 @@ func (_m *TracesConsumer) Capabilities() consumer.Capabilities { } // ConsumeTraces provides a mock function with given fields: ctx, td -func (_m *TracesConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (_m *TracesConsumer) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { ret := _m.Called(ctx, td) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, pdata.Traces) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, ptrace.Traces) error); ok { r0 = rf(ctx, td) } else { r0 = ret.Error(0) diff --git a/processor/spanmetricsprocessor/processor.go b/processor/spanmetricsprocessor/processor.go index f7de852dcb18..73c6e6c2e321 100644 --- a/processor/spanmetricsprocessor/processor.go +++ b/processor/spanmetricsprocessor/processor.go @@ -27,8 +27,10 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor/internal/cache" @@ -55,7 +57,7 @@ var ( ) type exemplarData struct { - traceID pdata.TraceID + traceID pcommon.TraceID value float64 } @@ -226,7 +228,7 @@ func (p *processorImp) Capabilities() consumer.Capabilities { // ConsumeTraces implements the consumer.Traces interface. // It aggregates the trace data to generate metrics, forwarding these metrics to the discovered metrics exporter. // The original input trace data will be forwarded to the next consumer, unmodified. -func (p *processorImp) ConsumeTraces(ctx context.Context, traces pdata.Traces) error { +func (p *processorImp) ConsumeTraces(ctx context.Context, traces ptrace.Traces) error { p.aggregateMetrics(traces) m, err := p.buildMetrics() @@ -247,8 +249,8 @@ func (p *processorImp) ConsumeTraces(ctx context.Context, traces pdata.Traces) e // buildMetrics collects the computed raw metrics data, builds the metrics object and // writes the raw metrics data into the metrics object. -func (p *processorImp) buildMetrics() (*pdata.Metrics, error) { - m := pdata.NewMetrics() +func (p *processorImp) buildMetrics() (*pmetric.Metrics, error) { + m := pmetric.NewMetrics() ilm := m.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName("spanmetricsprocessor") @@ -268,7 +270,7 @@ func (p *processorImp) buildMetrics() (*pdata.Metrics, error) { p.metricKeyToDimensions.RemoveEvictedItems() // If delta metrics, reset accumulated data - if p.config.GetAggregationTemporality() == pdata.MetricAggregationTemporalityDelta { + if p.config.GetAggregationTemporality() == pmetric.MetricAggregationTemporalityDelta { p.resetAccumulatedMetrics() } p.resetExemplarData() @@ -280,17 +282,17 @@ func (p *processorImp) buildMetrics() (*pdata.Metrics, error) { // collectLatencyMetrics collects the raw latency metrics, writing the data // into the given instrumentation library metrics. -func (p *processorImp) collectLatencyMetrics(ilm pdata.ScopeMetrics) error { +func (p *processorImp) collectLatencyMetrics(ilm pmetric.ScopeMetrics) error { for key := range p.latencyCount { mLatency := ilm.Metrics().AppendEmpty() - mLatency.SetDataType(pdata.MetricDataTypeHistogram) + mLatency.SetDataType(pmetric.MetricDataTypeHistogram) mLatency.SetName("latency") mLatency.Histogram().SetAggregationTemporality(p.config.GetAggregationTemporality()) - timestamp := pdata.NewTimestampFromTime(time.Now()) + timestamp := pcommon.NewTimestampFromTime(time.Now()) dpLatency := mLatency.Histogram().DataPoints().AppendEmpty() - dpLatency.SetStartTimestamp(pdata.NewTimestampFromTime(p.startTime)) + dpLatency.SetStartTimestamp(pcommon.NewTimestampFromTime(p.startTime)) dpLatency.SetTimestamp(timestamp) dpLatency.SetExplicitBounds(p.latencyBounds) dpLatency.SetBucketCounts(p.latencyBucketCounts[key]) @@ -312,17 +314,17 @@ func (p *processorImp) collectLatencyMetrics(ilm pdata.ScopeMetrics) error { // collectCallMetrics collects the raw call count metrics, writing the data // into the given instrumentation library metrics. -func (p *processorImp) collectCallMetrics(ilm pdata.ScopeMetrics) error { +func (p *processorImp) collectCallMetrics(ilm pmetric.ScopeMetrics) error { for key := range p.callSum { mCalls := ilm.Metrics().AppendEmpty() - mCalls.SetDataType(pdata.MetricDataTypeSum) + mCalls.SetDataType(pmetric.MetricDataTypeSum) mCalls.SetName("calls_total") mCalls.Sum().SetIsMonotonic(true) mCalls.Sum().SetAggregationTemporality(p.config.GetAggregationTemporality()) dpCalls := mCalls.Sum().DataPoints().AppendEmpty() - dpCalls.SetStartTimestamp(pdata.NewTimestampFromTime(p.startTime)) - dpCalls.SetTimestamp(pdata.NewTimestampFromTime(time.Now())) + dpCalls.SetStartTimestamp(pcommon.NewTimestampFromTime(p.startTime)) + dpCalls.SetTimestamp(pcommon.NewTimestampFromTime(time.Now())) dpCalls.SetIntVal(p.callSum[key]) dimensions, err := p.getDimensionsByMetricKey(key) @@ -337,9 +339,9 @@ func (p *processorImp) collectCallMetrics(ilm pdata.ScopeMetrics) error { } // getDimensionsByMetricKey gets dimensions from `metricKeyToDimensions` cache. -func (p *processorImp) getDimensionsByMetricKey(k metricKey) (*pdata.Map, error) { +func (p *processorImp) getDimensionsByMetricKey(k metricKey) (*pcommon.Map, error) { if item, ok := p.metricKeyToDimensions.Get(k); ok { - if attributeMap, ok := item.(pdata.Map); ok { + if attributeMap, ok := item.(pcommon.Map); ok { return &attributeMap, nil } return nil, fmt.Errorf("type assertion of metricKeyToDimensions attributes failed, the key is %q", k) @@ -352,7 +354,7 @@ func (p *processorImp) getDimensionsByMetricKey(k metricKey) (*pdata.Map, error) // Each metric is identified by a key that is built from the service name // and span metadata such as operation, kind, status_code and any additional // dimensions the user has configured. -func (p *processorImp) aggregateMetrics(traces pdata.Traces) { +func (p *processorImp) aggregateMetrics(traces ptrace.Traces) { for i := 0; i < traces.ResourceSpans().Len(); i++ { rspans := traces.ResourceSpans().At(i) r := rspans.Resource() @@ -366,7 +368,7 @@ func (p *processorImp) aggregateMetrics(traces pdata.Traces) { } } -func (p *processorImp) aggregateMetricsForServiceSpans(rspans pdata.ResourceSpans, serviceName string) { +func (p *processorImp) aggregateMetricsForServiceSpans(rspans ptrace.ResourceSpans, serviceName string) { ilsSlice := rspans.ScopeSpans() for j := 0; j < ilsSlice.Len(); j++ { ils := ilsSlice.At(j) @@ -378,7 +380,7 @@ func (p *processorImp) aggregateMetricsForServiceSpans(rspans pdata.ResourceSpan } } -func (p *processorImp) aggregateMetricsForSpan(serviceName string, span pdata.Span, resourceAttr pdata.Map) { +func (p *processorImp) aggregateMetricsForSpan(serviceName string, span ptrace.Span, resourceAttr pcommon.Map) { latencyInMilliseconds := float64(span.EndTimestamp()-span.StartTimestamp()) / float64(time.Millisecond.Nanoseconds()) // Binary search to find the latencyInMilliseconds bucket index. @@ -410,7 +412,7 @@ func (p *processorImp) resetAccumulatedMetrics() { } // updateLatencyExemplars sets the histogram exemplars for the given metric key and append the exemplar data. -func (p *processorImp) updateLatencyExemplars(key metricKey, value float64, traceID pdata.TraceID) { +func (p *processorImp) updateLatencyExemplars(key metricKey, value float64, traceID pcommon.TraceID) { if _, ok := p.latencyExemplarsData[key]; !ok { p.latencyExemplarsData[key] = []exemplarData{} } @@ -439,8 +441,8 @@ func (p *processorImp) updateLatencyMetrics(key metricKey, latency float64, inde p.latencyBucketCounts[key][index]++ } -func (p *processorImp) buildDimensionKVs(serviceName string, span pdata.Span, optionalDims []Dimension, resourceAttrs pdata.Map) pdata.Map { - dims := pdata.NewMap() +func (p *processorImp) buildDimensionKVs(serviceName string, span ptrace.Span, optionalDims []Dimension, resourceAttrs pcommon.Map) pcommon.Map { + dims := pcommon.NewMap() dims.UpsertString(serviceNameKey, serviceName) dims.UpsertString(operationKey, span.Name()) dims.UpsertString(spanKindKey, span.Kind().String()) @@ -467,7 +469,7 @@ func concatDimensionValue(metricKeyBuilder *strings.Builder, value string, prefi // or resource attributes. If the dimension exists in both, the span's attributes, being the most specific, takes precedence. // // The metric key is a simple concatenation of dimension values, delimited by a null character. -func buildKey(serviceName string, span pdata.Span, optionalDims []Dimension, resourceAttrs pdata.Map) metricKey { +func buildKey(serviceName string, span ptrace.Span, optionalDims []Dimension, resourceAttrs pcommon.Map) metricKey { var metricKeyBuilder strings.Builder concatDimensionValue(&metricKeyBuilder, serviceName, false) concatDimensionValue(&metricKeyBuilder, span.Name(), true) @@ -491,7 +493,7 @@ func buildKey(serviceName string, span pdata.Span, optionalDims []Dimension, res // // The ok flag indicates if a dimension value was fetched in order to differentiate // an empty string value from a state where no value was found. -func getDimensionValue(d Dimension, spanAttr pdata.Map, resourceAttr pdata.Map) (v pdata.Value, ok bool) { +func getDimensionValue(d Dimension, spanAttr pcommon.Map, resourceAttr pcommon.Map) (v pcommon.Value, ok bool) { // The more specific span attribute should take precedence. if attr, exists := spanAttr.Get(d.Name); exists { return attr, true @@ -501,7 +503,7 @@ func getDimensionValue(d Dimension, spanAttr pdata.Map, resourceAttr pdata.Map) } // Set the default if configured, otherwise this metric will have no value set for the dimension. if d.Default != nil { - return pdata.NewValueString(*d.Default), true + return pcommon.NewValueString(*d.Default), true } return v, ok } @@ -509,7 +511,7 @@ func getDimensionValue(d Dimension, spanAttr pdata.Map, resourceAttr pdata.Map) // cache the dimension key-value map for the metricKey if there is a cache miss. // This enables a lookup of the dimension key-value map when constructing the metric like so: // LabelsMap().InitFromMap(p.metricKeyToDimensions[key]) -func (p *processorImp) cache(serviceName string, span pdata.Span, k metricKey, resourceAttrs pdata.Map) { +func (p *processorImp) cache(serviceName string, span ptrace.Span, k metricKey, resourceAttrs pcommon.Map) { p.metricKeyToDimensions.ContainsOrAdd(k, p.buildDimensionKVs(serviceName, span, p.dimensions, resourceAttrs)) } @@ -549,8 +551,8 @@ func sanitizeRune(r rune) rune { } // setLatencyExemplars sets the histogram exemplars. -func setLatencyExemplars(exemplarsData []exemplarData, timestamp pdata.Timestamp, exemplars pdata.ExemplarSlice) { - es := pdata.NewExemplarSlice() +func setLatencyExemplars(exemplarsData []exemplarData, timestamp pcommon.Timestamp, exemplars pmetric.ExemplarSlice) { + es := pmetric.NewExemplarSlice() es.EnsureCapacity(len(exemplarsData)) for _, ed := range exemplarsData { @@ -565,7 +567,7 @@ func setLatencyExemplars(exemplarsData []exemplarData, timestamp pdata.Timestamp exemplar.SetDoubleVal(value) exemplar.SetTimestamp(timestamp) - exemplar.FilteredAttributes().Insert(traceIDKey, pdata.NewValueString(traceID.HexString())) + exemplar.FilteredAttributes().Insert(traceIDKey, pcommon.NewValueString(traceID.HexString())) } es.CopyTo(exemplars) diff --git a/processor/spanmetricsprocessor/processor_test.go b/processor/spanmetricsprocessor/processor_test.go index c740026763ff..8004a4e153d7 100644 --- a/processor/spanmetricsprocessor/processor_test.go +++ b/processor/spanmetricsprocessor/processor_test.go @@ -29,8 +29,10 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter/otlpexporter" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap/zaptest" "google.golang.org/grpc/metadata" @@ -65,7 +67,7 @@ type metricID struct { } type metricDataPoint interface { - Attributes() pdata.Map + Attributes() pcommon.Map } type serviceSpans struct { @@ -75,8 +77,8 @@ type serviceSpans struct { type span struct { operation string - kind pdata.SpanKind - statusCode pdata.StatusCode + kind ptrace.SpanKind + statusCode ptrace.StatusCode } func TestProcessorStart(t *testing.T) { @@ -224,34 +226,34 @@ func TestProcessorConsumeTraces(t *testing.T) { testcases := []struct { name string aggregationTemporality string - verifier func(t testing.TB, input pdata.Metrics) bool - traces []pdata.Traces + verifier func(t testing.TB, input pmetric.Metrics) bool + traces []ptrace.Traces }{ { name: "Test single consumption, three spans (Cumulative).", aggregationTemporality: cumulative, verifier: verifyConsumeMetricsInputCumulative, - traces: []pdata.Traces{buildSampleTrace()}, + traces: []ptrace.Traces{buildSampleTrace()}, }, { name: "Test single consumption, three spans (Delta).", aggregationTemporality: delta, verifier: verifyConsumeMetricsInputDelta, - traces: []pdata.Traces{buildSampleTrace()}, + traces: []ptrace.Traces{buildSampleTrace()}, }, { // More consumptions, should accumulate additively. name: "Test two consumptions (Cumulative).", aggregationTemporality: cumulative, verifier: verifyMultipleCumulativeConsumptions(), - traces: []pdata.Traces{buildSampleTrace(), buildSampleTrace()}, + traces: []ptrace.Traces{buildSampleTrace(), buildSampleTrace()}, }, { // More consumptions, should not accumulate. Therefore, end state should be the same as single consumption case. name: "Test two consumptions (Delta).", aggregationTemporality: delta, verifier: verifyConsumeMetricsInputDelta, - traces: []pdata.Traces{buildSampleTrace(), buildSampleTrace()}, + traces: []ptrace.Traces{buildSampleTrace(), buildSampleTrace()}, }, } @@ -262,7 +264,7 @@ func TestProcessorConsumeTraces(t *testing.T) { tcon := &mocks.TracesConsumer{} // Mocked metric exporter will perform validation on metrics, during p.ConsumeTraces() - mexp.On("ConsumeMetrics", mock.Anything, mock.MatchedBy(func(input pdata.Metrics) bool { + mexp.On("ConsumeMetrics", mock.Anything, mock.MatchedBy(func(input pmetric.Metrics) bool { return tc.verifier(t, input) })).Return(nil) tcon.On("ConsumeTraces", mock.Anything, mock.Anything).Return(nil) @@ -375,28 +377,28 @@ func newProcessorImp(mexp *mocks.MetricsExporter, tcon *mocks.TracesConsumer, de } // verifyConsumeMetricsInputCumulative expects one accumulation of metrics, and marked as cumulative -func verifyConsumeMetricsInputCumulative(t testing.TB, input pdata.Metrics) bool { - return verifyConsumeMetricsInput(t, input, pdata.MetricAggregationTemporalityCumulative, 1) +func verifyConsumeMetricsInputCumulative(t testing.TB, input pmetric.Metrics) bool { + return verifyConsumeMetricsInput(t, input, pmetric.MetricAggregationTemporalityCumulative, 1) } // verifyConsumeMetricsInputDelta expects one accumulation of metrics, and marked as delta -func verifyConsumeMetricsInputDelta(t testing.TB, input pdata.Metrics) bool { - return verifyConsumeMetricsInput(t, input, pdata.MetricAggregationTemporalityDelta, 1) +func verifyConsumeMetricsInputDelta(t testing.TB, input pmetric.Metrics) bool { + return verifyConsumeMetricsInput(t, input, pmetric.MetricAggregationTemporalityDelta, 1) } // verifyMultipleCumulativeConsumptions expects the amount of accumulations as kept track of by numCumulativeConsumptions. // numCumulativeConsumptions acts as a multiplier for the values, since the cumulative metrics are additive. -func verifyMultipleCumulativeConsumptions() func(t testing.TB, input pdata.Metrics) bool { +func verifyMultipleCumulativeConsumptions() func(t testing.TB, input pmetric.Metrics) bool { numCumulativeConsumptions := 0 - return func(t testing.TB, input pdata.Metrics) bool { + return func(t testing.TB, input pmetric.Metrics) bool { numCumulativeConsumptions++ - return verifyConsumeMetricsInput(t, input, pdata.MetricAggregationTemporalityCumulative, numCumulativeConsumptions) + return verifyConsumeMetricsInput(t, input, pmetric.MetricAggregationTemporalityCumulative, numCumulativeConsumptions) } } // verifyConsumeMetricsInput verifies the input of the ConsumeMetrics call from this processor. // This is the best point to verify the computed metrics from spans are as expected. -func verifyConsumeMetricsInput(t testing.TB, input pdata.Metrics, expectedTemporality pdata.MetricAggregationTemporality, numCumulativeConsumptions int) bool { +func verifyConsumeMetricsInput(t testing.TB, input pmetric.Metrics, expectedTemporality pmetric.MetricAggregationTemporality, numCumulativeConsumptions int) bool { require.Equal(t, 6, input.MetricCount(), "Should be 3 for each of call count and latency. Each group of 3 metrics is made of: "+ "service-a (server kind) -> service-a (client kind) -> service-b (service kind)", @@ -472,18 +474,18 @@ func verifyConsumeMetricsInput(t testing.TB, input pdata.Metrics, expectedTempor func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metricID]bool) { mID := metricID{} - wantDimensions := map[string]pdata.Value{ - stringAttrName: pdata.NewValueString("stringAttrValue"), - intAttrName: pdata.NewValueInt(99), - doubleAttrName: pdata.NewValueDouble(99.99), - boolAttrName: pdata.NewValueBool(true), - nullAttrName: pdata.NewValueEmpty(), - arrayAttrName: pdata.NewValueSlice(), - mapAttrName: pdata.NewValueMap(), - notInSpanAttrName0: pdata.NewValueString("defaultNotInSpanAttrVal"), - regionResourceAttrName: pdata.NewValueString(sampleRegion), + wantDimensions := map[string]pcommon.Value{ + stringAttrName: pcommon.NewValueString("stringAttrValue"), + intAttrName: pcommon.NewValueInt(99), + doubleAttrName: pcommon.NewValueDouble(99.99), + boolAttrName: pcommon.NewValueBool(true), + nullAttrName: pcommon.NewValueEmpty(), + arrayAttrName: pcommon.NewValueSlice(), + mapAttrName: pcommon.NewValueMap(), + notInSpanAttrName0: pcommon.NewValueString("defaultNotInSpanAttrVal"), + regionResourceAttrName: pcommon.NewValueString(sampleRegion), } - dp.Attributes().Range(func(k string, v pdata.Value) bool { + dp.Attributes().Range(func(k string, v pcommon.Value) bool { switch k { case serviceNameKey: mID.service = v.StringVal() @@ -512,8 +514,8 @@ func verifyMetricLabels(dp metricDataPoint, t testing.TB, seenMetricIDs map[metr // service-a/ping (server) -> // service-a/ping (client) -> // service-b/ping (server) -func buildSampleTrace() pdata.Traces { - traces := pdata.NewTraces() +func buildSampleTrace() ptrace.Traces { + traces := ptrace.NewTraces() initServiceSpans( serviceSpans{ @@ -521,13 +523,13 @@ func buildSampleTrace() pdata.Traces { spans: []span{ { operation: "/ping", - kind: pdata.SpanKindServer, - statusCode: pdata.StatusCodeOk, + kind: ptrace.SpanKindServer, + statusCode: ptrace.StatusCodeOk, }, { operation: "/ping", - kind: pdata.SpanKindClient, - statusCode: pdata.StatusCodeOk, + kind: ptrace.SpanKindClient, + statusCode: ptrace.StatusCodeOk, }, }, }, traces.ResourceSpans().AppendEmpty()) @@ -537,8 +539,8 @@ func buildSampleTrace() pdata.Traces { spans: []span{ { operation: "/ping", - kind: pdata.SpanKindServer, - statusCode: pdata.StatusCodeError, + kind: ptrace.SpanKindServer, + statusCode: ptrace.StatusCodeError, }, }, }, traces.ResourceSpans().AppendEmpty()) @@ -546,7 +548,7 @@ func buildSampleTrace() pdata.Traces { return traces } -func initServiceSpans(serviceSpans serviceSpans, spans pdata.ResourceSpans) { +func initServiceSpans(serviceSpans serviceSpans, spans ptrace.ResourceSpans) { if serviceSpans.serviceName != "" { spans.Resource().Attributes(). InsertString(conventions.AttributeServiceName, serviceSpans.serviceName) @@ -560,21 +562,21 @@ func initServiceSpans(serviceSpans serviceSpans, spans pdata.ResourceSpans) { } } -func initSpan(span span, s pdata.Span) { +func initSpan(span span, s ptrace.Span) { s.SetName(span.operation) s.SetKind(span.kind) s.Status().SetCode(span.statusCode) now := time.Now() - s.SetStartTimestamp(pdata.NewTimestampFromTime(now)) - s.SetEndTimestamp(pdata.NewTimestampFromTime(now.Add(sampleLatencyDuration))) + s.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) + s.SetEndTimestamp(pcommon.NewTimestampFromTime(now.Add(sampleLatencyDuration))) s.Attributes().InsertString(stringAttrName, "stringAttrValue") s.Attributes().InsertInt(intAttrName, 99) s.Attributes().InsertDouble(doubleAttrName, 99.99) s.Attributes().InsertBool(boolAttrName, true) s.Attributes().InsertNull(nullAttrName) - s.Attributes().Insert(mapAttrName, pdata.NewValueMap()) - s.Attributes().Insert(arrayAttrName, pdata.NewValueSlice()) - s.SetTraceID(pdata.NewTraceID([16]byte{byte(42)})) + s.Attributes().Insert(mapAttrName, pcommon.NewValueMap()) + s.Attributes().Insert(arrayAttrName, pcommon.NewValueSlice()) + s.SetTraceID(pcommon.NewTraceID([16]byte{byte(42)})) } func newOTLPExporters(t *testing.T) (*otlpexporter.Config, component.MetricsExporter, component.TracesExporter) { @@ -594,13 +596,13 @@ func newOTLPExporters(t *testing.T) (*otlpexporter.Config, component.MetricsExpo } func TestBuildKeySameServiceOperationCharSequence(t *testing.T) { - span0 := pdata.NewSpan() + span0 := ptrace.NewSpan() span0.SetName("c") - k0 := buildKey("ab", span0, nil, pdata.NewMap()) + k0 := buildKey("ab", span0, nil, pcommon.NewMap()) - span1 := pdata.NewSpan() + span1 := ptrace.NewSpan() span1.SetName("bc") - k1 := buildKey("a", span1, nil, pdata.NewMap()) + k1 := buildKey("a", span1, nil, pcommon.NewMap()) assert.NotEqual(t, k0, k1) assert.Equal(t, metricKey("ab\u0000c\u0000SPAN_KIND_UNSPECIFIED\u0000STATUS_CODE_UNSET"), k0) @@ -669,9 +671,9 @@ func TestBuildKeyWithDimensions(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - resAttr := pdata.NewMapFromRaw(tc.resourceAttrMap) - span0 := pdata.NewSpan() - pdata.NewMapFromRaw(tc.spanAttrMap).CopyTo(span0.Attributes()) + resAttr := pcommon.NewMapFromRaw(tc.resourceAttrMap) + span0 := ptrace.NewSpan() + pcommon.NewMapFromRaw(tc.spanAttrMap).CopyTo(span0.Attributes()) span0.SetName("c") k := buildKey("ab", span0, tc.optionalDims, resAttr) @@ -785,8 +787,8 @@ func TestSetLatencyExemplars(t *testing.T) { // ----- conditions ------------------------------------------------------- traces := buildSampleTrace() traceID := traces.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID() - exemplarSlice := pdata.NewExemplarSlice() - timestamp := pdata.NewTimestampFromTime(time.Now()) + exemplarSlice := pmetric.NewExemplarSlice() + timestamp := pcommon.NewTimestampFromTime(time.Now()) value := float64(42) ed := []exemplarData{{traceID: traceID, value: value}} diff --git a/processor/spanprocessor/go.mod b/processor/spanprocessor/go.mod index a49a4cda9f92..8b2cdb70af86 100644 --- a/processor/spanprocessor/go.mod +++ b/processor/spanprocessor/go.mod @@ -5,15 +5,16 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -33,3 +34,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/spanprocessor/go.sum b/processor/spanprocessor/go.sum index a3baa0ae42d1..3b836d428bd2 100644 --- a/processor/spanprocessor/go.sum +++ b/processor/spanprocessor/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -70,7 +70,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -160,10 +159,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -203,7 +204,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -225,7 +226,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/spanprocessor/span.go b/processor/spanprocessor/span.go index c28d56125f18..c583535d359d 100644 --- a/processor/spanprocessor/span.go +++ b/processor/spanprocessor/span.go @@ -21,7 +21,8 @@ import ( "strconv" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterspan" ) @@ -80,7 +81,7 @@ func newSpanProcessor(config Config) (*spanProcessor, error) { return sp, nil } -func (sp *spanProcessor) processTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { +func (sp *spanProcessor) processTraces(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { rs := rss.At(i) @@ -104,7 +105,7 @@ func (sp *spanProcessor) processTraces(_ context.Context, td pdata.Traces) (pdat return td, nil } -func (sp *spanProcessor) processFromAttributes(span pdata.Span) { +func (sp *spanProcessor) processFromAttributes(span ptrace.Span) { if len(sp.config.Rename.FromAttributes) == 0 { // There is FromAttributes rule. return @@ -142,13 +143,13 @@ func (sp *spanProcessor) processFromAttributes(span pdata.Span) { } switch attr.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: sb.WriteString(attr.StringVal()) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: sb.WriteString(strconv.FormatBool(attr.BoolVal())) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: sb.WriteString(strconv.FormatFloat(attr.DoubleVal(), 'f', -1, 64)) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: sb.WriteString(strconv.FormatInt(attr.IntVal(), 10)) default: sb.WriteString("") @@ -157,7 +158,7 @@ func (sp *spanProcessor) processFromAttributes(span pdata.Span) { span.SetName(sb.String()) } -func (sp *spanProcessor) processToAttributes(span pdata.Span) { +func (sp *spanProcessor) processToAttributes(span ptrace.Span) { if span.Name() == "" { // There is no span name to work on. return @@ -222,17 +223,17 @@ func (sp *spanProcessor) processToAttributes(span pdata.Span) { } } -func (sp *spanProcessor) processUpdateStatus(span pdata.Span) { +func (sp *spanProcessor) processUpdateStatus(span ptrace.Span) { cfg := sp.config.SetStatus if cfg != nil { if cfg.Code == statusCodeOk { - span.Status().SetCode(pdata.StatusCodeOk) + span.Status().SetCode(ptrace.StatusCodeOk) span.Status().SetMessage("") } else if cfg.Code == statusCodeError { - span.Status().SetCode(pdata.StatusCodeError) + span.Status().SetCode(ptrace.StatusCodeError) span.Status().SetMessage(cfg.Description) } else if cfg.Code == statusCodeUnset { - span.Status().SetCode(pdata.StatusCodeUnset) + span.Status().SetCode(ptrace.StatusCodeUnset) span.Status().SetMessage("") } } diff --git a/processor/spanprocessor/span_test.go b/processor/spanprocessor/span_test.go index 7965863b3e0c..54b92a0d182c 100644 --- a/processor/spanprocessor/span_test.go +++ b/processor/spanprocessor/span_test.go @@ -24,8 +24,9 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterconfig" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -78,15 +79,15 @@ func runIndividualTestCase(t *testing.T, tt testCase, tp component.TracesProcess }) } -func generateTraceData(serviceName, inputName string, attrs map[string]interface{}) pdata.Traces { - td := pdata.NewTraces() +func generateTraceData(serviceName, inputName string, attrs map[string]interface{}) ptrace.Traces { + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() if serviceName != "" { rs.Resource().Attributes().UpsertString(conventions.AttributeServiceName, serviceName) } span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName(inputName) - pdata.NewMapFromRaw(attrs).CopyTo(span.Attributes()) + pcommon.NewMapFromRaw(attrs).CopyTo(span.Attributes()) span.Attributes().Sort() return td } @@ -95,15 +96,15 @@ func generateTraceData(serviceName, inputName string, attrs map[string]interface func TestSpanProcessor_NilEmptyData(t *testing.T) { type nilEmptyTestCase struct { name string - input pdata.Traces - output pdata.Traces + input ptrace.Traces + output ptrace.Traces } // TODO: Add test for "nil" Span. This needs support from data slices to allow to construct that. testCases := []nilEmptyTestCase{ { name: "empty", - input: pdata.NewTraces(), - output: pdata.NewTraces(), + input: ptrace.NewTraces(), + output: ptrace.NewTraces(), }, { name: "one-empty-resource-spans", @@ -595,13 +596,13 @@ func TestSpanProcessor_skipSpan(t *testing.T) { } } -func generateTraceDataSetStatus(code pdata.StatusCode, description string, attrs map[string]interface{}) pdata.Traces { - td := pdata.NewTraces() +func generateTraceDataSetStatus(code ptrace.StatusCode, description string, attrs map[string]interface{}) ptrace.Traces { + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.Status().SetCode(code) span.Status().SetMessage(description) - pdata.NewMapFromRaw(attrs).Sort().CopyTo(span.Attributes()) + pcommon.NewMapFromRaw(attrs).Sort().CopyTo(span.Attributes()) return td } @@ -617,11 +618,11 @@ func TestSpanProcessor_setStatusCode(t *testing.T) { require.Nil(t, err) require.NotNil(t, tp) - td := generateTraceDataSetStatus(pdata.StatusCodeUnset, "foobar", nil) + td := generateTraceDataSetStatus(ptrace.StatusCodeUnset, "foobar", nil) assert.NoError(t, tp.ConsumeTraces(context.Background(), td)) - assert.EqualValues(t, generateTraceDataSetStatus(pdata.StatusCodeError, "Set custom error message", nil), td) + assert.EqualValues(t, generateTraceDataSetStatus(ptrace.StatusCodeError, "Set custom error message", nil), td) } func TestSpanProcessor_setStatusCodeConditionally(t *testing.T) { @@ -647,21 +648,21 @@ func TestSpanProcessor_setStatusCodeConditionally(t *testing.T) { testCases := []struct { inputAttributes map[string]interface{} - inputStatusCode pdata.StatusCode - outputStatusCode pdata.StatusCode + inputStatusCode ptrace.StatusCode + outputStatusCode ptrace.StatusCode outputStatusDescription string }{ { // without attribiutes - should not apply rule and leave status code as it is - inputStatusCode: pdata.StatusCodeOk, - outputStatusCode: pdata.StatusCodeOk, + inputStatusCode: ptrace.StatusCodeOk, + outputStatusCode: ptrace.StatusCodeOk, }, { inputAttributes: map[string]interface{}{ "http.status_code": 400, }, - inputStatusCode: pdata.StatusCodeOk, - outputStatusCode: pdata.StatusCodeError, + inputStatusCode: ptrace.StatusCodeOk, + outputStatusCode: ptrace.StatusCodeError, outputStatusDescription: "custom error message", }, } diff --git a/processor/tailsamplingprocessor/go.mod b/processor/tailsamplingprocessor/go.mod index fdc50f5de7b1..495d9f0efa71 100644 --- a/processor/tailsamplingprocessor/go.mod +++ b/processor/tailsamplingprocessor/go.mod @@ -8,8 +8,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -17,14 +17,13 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -35,3 +34,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/tailsamplingprocessor/go.sum b/processor/tailsamplingprocessor/go.sum index dff1a39c2c8e..bba57db45540 100644 --- a/processor/tailsamplingprocessor/go.sum +++ b/processor/tailsamplingprocessor/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -72,7 +72,6 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -102,8 +101,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -146,8 +145,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -162,17 +159,17 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -206,7 +203,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -228,7 +225,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go index c2d6f1dfbcce..5a482263a7d3 100644 --- a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go +++ b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher.go @@ -20,7 +20,7 @@ import ( "errors" "sync" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) var ( @@ -31,7 +31,7 @@ var ( ) // Batch is the type of batches held by the Batcher. -type Batch []pdata.TraceID +type Batch []pcommon.TraceID // Batcher behaves like a pipeline of batches that has a fixed number of batches in the pipe // and a new batch being built outside of the pipe. Items can be concurrently added to the batch @@ -44,7 +44,7 @@ type Batcher interface { // of limiting the growth of the current batch if appropriate for its scenario. It can // either call CloseCurrentAndTakeFirstBatch earlier or stop adding new items depending on what is // required by the scenario. - AddToCurrentBatch(id pdata.TraceID) + AddToCurrentBatch(id pcommon.TraceID) // CloseCurrentAndTakeFirstBatch takes the batch at the front of the pipe, and moves the current // batch to the end of the pipe, creating a new batch to receive new items. This operation should // be atomic. @@ -59,8 +59,8 @@ type Batcher interface { var _ Batcher = (*batcher)(nil) type batcher struct { - pendingIds chan pdata.TraceID // Channel for the ids to be added to the next batch. - batches chan Batch // Channel with already captured batches. + pendingIds chan pcommon.TraceID // Channel for the ids to be added to the next batch. + batches chan Batch // Channel with already captured batches. // cbMutex protects the currentBatch storing ids. cbMutex sync.Mutex @@ -93,7 +93,7 @@ func New(numBatches, newBatchesInitialCapacity, batchChannelSize uint64) (Batche } batcher := &batcher{ - pendingIds: make(chan pdata.TraceID, batchChannelSize), + pendingIds: make(chan pcommon.TraceID, batchChannelSize), batches: batches, currentBatch: make(Batch, 0, newBatchesInitialCapacity), newBatchesInitialCapacity: newBatchesInitialCapacity, @@ -114,7 +114,7 @@ func New(numBatches, newBatchesInitialCapacity, batchChannelSize uint64) (Batche return batcher, nil } -func (b *batcher) AddToCurrentBatch(id pdata.TraceID) { +func (b *batcher) AddToCurrentBatch(id pcommon.TraceID) { b.pendingIds <- id } diff --git a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go index 83cca9ef6e53..b31c054fd16f 100644 --- a/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go +++ b/processor/tailsamplingprocessor/internal/idbatcher/id_batcher_test.go @@ -23,7 +23,7 @@ import ( "time" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestBatcherNew(t *testing.T) { @@ -119,7 +119,7 @@ func concurrencyTest(t *testing.T, numBatches, newBatchesInitialCapacity, batchC wg := &sync.WaitGroup{} for i := 0; i < len(ids); i++ { wg.Add(1) - go func(id pdata.TraceID) { + go func(id pcommon.TraceID) { batcher.AddToCurrentBatch(id) wg.Done() }(ids[i]) @@ -151,13 +151,13 @@ func concurrencyTest(t *testing.T, numBatches, newBatchesInitialCapacity, batchC } } -func generateSequentialIds(numIds uint64) []pdata.TraceID { - ids := make([]pdata.TraceID, numIds) +func generateSequentialIds(numIds uint64) []pcommon.TraceID { + ids := make([]pcommon.TraceID, numIds) for i := uint64(0); i < numIds; i++ { traceID := [16]byte{} binary.BigEndian.PutUint64(traceID[:8], 0) binary.BigEndian.PutUint64(traceID[8:], i) - ids[i] = pdata.NewTraceID(traceID) + ids[i] = pcommon.NewTraceID(traceID) } return ids } diff --git a/processor/tailsamplingprocessor/internal/sampling/always_sample.go b/processor/tailsamplingprocessor/internal/sampling/always_sample.go index a6858e2df176..065345678766 100644 --- a/processor/tailsamplingprocessor/internal/sampling/always_sample.go +++ b/processor/tailsamplingprocessor/internal/sampling/always_sample.go @@ -15,7 +15,7 @@ package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -33,7 +33,7 @@ func NewAlwaysSample(logger *zap.Logger) PolicyEvaluator { } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (as *alwaysSample) Evaluate(pdata.TraceID, *TraceData) (Decision, error) { +func (as *alwaysSample) Evaluate(pcommon.TraceID, *TraceData) (Decision, error) { as.logger.Debug("Evaluating spans in always-sample filter") return Sampled, nil } diff --git a/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go b/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go index cb040afc43d6..18d48e9eb175 100644 --- a/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/always_sample_test.go @@ -18,14 +18,14 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) func TestEvaluate_AlwaysSample(t *testing.T) { filter := NewAlwaysSample(zap.NewNop()) - decision, err := filter.Evaluate(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16}), newTraceStringAttrs(pdata.NewMap(), "example", "value")) + decision, err := filter.Evaluate(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16}), newTraceStringAttrs(pcommon.NewMap(), "example", "value")) assert.Nil(t, err) assert.Equal(t, decision, Sampled) } diff --git a/processor/tailsamplingprocessor/internal/sampling/and.go b/processor/tailsamplingprocessor/internal/sampling/and.go index 0772304f9482..682720ecb1dc 100644 --- a/processor/tailsamplingprocessor/internal/sampling/and.go +++ b/processor/tailsamplingprocessor/internal/sampling/and.go @@ -15,7 +15,7 @@ package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -37,7 +37,7 @@ func NewAnd( } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (c *And) Evaluate(traceID pdata.TraceID, trace *TraceData) (Decision, error) { +func (c *And) Evaluate(traceID pcommon.TraceID, trace *TraceData) (Decision, error) { // The policy iterates over all sub-policies and returns Sampled if all sub-policies returned a Sampled Decision. // If any subpolicy returns NotSampled, it returns NotSampled Decision. for _, sub := range c.subpolicies { @@ -55,6 +55,6 @@ func (c *And) Evaluate(traceID pdata.TraceID, trace *TraceData) (Decision, error // OnDroppedSpans is called when the trace needs to be dropped, due to memory // pressure, before the decision_wait time has been reached. -func (c *And) OnDroppedSpans(pdata.TraceID, *TraceData) (Decision, error) { +func (c *And) OnDroppedSpans(pcommon.TraceID, *TraceData) (Decision, error) { return Sampled, nil } diff --git a/processor/tailsamplingprocessor/internal/sampling/and_test.go b/processor/tailsamplingprocessor/internal/sampling/and_test.go index 3b6da2f0ae71..d2d290b100bb 100644 --- a/processor/tailsamplingprocessor/internal/sampling/and_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/and_test.go @@ -19,7 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -32,17 +33,17 @@ func TestAndEvaluatorNotSampled(t *testing.T) { and := NewAnd(zap.NewNop(), []PolicyEvaluator{n1, n2}) - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() - span.Status().SetCode(pdata.StatusCodeError) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.Status().SetCode(ptrace.StatusCodeError) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) trace := &TraceData{ - ReceivedBatches: []pdata.Traces{traces}, + ReceivedBatches: []ptrace.Traces{traces}, } decision, err := and.Evaluate(traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) @@ -59,18 +60,18 @@ func TestAndEvaluatorSampled(t *testing.T) { and := NewAnd(zap.NewNop(), []PolicyEvaluator{n1, n2}) - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.Attributes().InsertString("attribute_name", "attribute_value") - span.Status().SetCode(pdata.StatusCodeError) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.Status().SetCode(ptrace.StatusCodeError) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) trace := &TraceData{ - ReceivedBatches: []pdata.Traces{traces}, + ReceivedBatches: []ptrace.Traces{traces}, } decision, err := and.Evaluate(traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) diff --git a/processor/tailsamplingprocessor/internal/sampling/composite.go b/processor/tailsamplingprocessor/internal/sampling/composite.go index 8977536bb5e2..98047229bdcf 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite.go @@ -15,7 +15,7 @@ package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -85,7 +85,7 @@ func NewComposite( } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (c *Composite) Evaluate(traceID pdata.TraceID, trace *TraceData) (Decision, error) { +func (c *Composite) Evaluate(traceID pcommon.TraceID, trace *TraceData) (Decision, error) { // Rate limiting works by counting spans that are sampled during each 1 second // time period. Until the total number of spans during a particular second // exceeds the allocated number of spans-per-second the traces are sampled, @@ -136,7 +136,7 @@ func (c *Composite) Evaluate(traceID pdata.TraceID, trace *TraceData) (Decision, // OnDroppedSpans is called when the trace needs to be dropped, due to memory // pressure, before the decision_wait time has been reached. -func (c *Composite) OnDroppedSpans(pdata.TraceID, *TraceData) (Decision, error) { +func (c *Composite) OnDroppedSpans(pcommon.TraceID, *TraceData) (Decision, error) { // Here we have a number of possible solutions: // 1. Random sample traces based on maxTotalSPS. // 2. Perform full composite sampling logic by calling Composite.Evaluate(), essentially diff --git a/processor/tailsamplingprocessor/internal/sampling/composite_test.go b/processor/tailsamplingprocessor/internal/sampling/composite_test.go index cdaa18676a1d..ba592515a0f3 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite_test.go @@ -19,7 +19,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -31,30 +32,30 @@ func (f FakeTimeProvider) getCurSecond() int64 { return f.second } -var traceID = pdata.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x96, 0x9A, 0x89, 0x55, 0x57, 0x1A, 0x3F}) +var traceID = pcommon.NewTraceID([16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x96, 0x9A, 0x89, 0x55, 0x57, 0x1A, 0x3F}) func createTrace() *TraceData { trace := &TraceData{SpanCount: 1} return trace } -func newTraceID() pdata.TraceID { +func newTraceID() pcommon.TraceID { r := [16]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x96, 0x9A, 0x89, 0x55, 0x57, 0x1A, 0x3F} - return pdata.NewTraceID(r) + return pcommon.NewTraceID(r) } -func newTraceWithKV(traceID pdata.TraceID, key string, val int64) *TraceData { - var traceBatches []pdata.Traces - traces := pdata.NewTraces() +func newTraceWithKV(traceID pcommon.TraceID, key string, val int64) *TraceData { + var traceBatches []ptrace.Traces + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() span.SetTraceID(traceID) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) - span.SetStartTimestamp(pdata.NewTimestampFromTime( + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetStartTimestamp(pcommon.NewTimestampFromTime( time.Date(2020, 1, 1, 12, 0, 0, 0, time.UTC), )) - span.SetEndTimestamp(pdata.NewTimestampFromTime( + span.SetEndTimestamp(pcommon.NewTimestampFromTime( time.Date(2020, 1, 1, 12, 0, 16, 0, time.UTC), )) span.Attributes().InsertInt(key, val) diff --git a/processor/tailsamplingprocessor/internal/sampling/latency.go b/processor/tailsamplingprocessor/internal/sampling/latency.go index c42f1631bfa4..0021e3c012d4 100644 --- a/processor/tailsamplingprocessor/internal/sampling/latency.go +++ b/processor/tailsamplingprocessor/internal/sampling/latency.go @@ -15,7 +15,8 @@ package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -35,17 +36,17 @@ func NewLatency(logger *zap.Logger, thresholdMs int64) PolicyEvaluator { } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (l *latency) Evaluate(_ pdata.TraceID, traceData *TraceData) (Decision, error) { +func (l *latency) Evaluate(_ pcommon.TraceID, traceData *TraceData) (Decision, error) { l.logger.Debug("Evaluating spans in latency filter") traceData.Lock() batches := traceData.ReceivedBatches traceData.Unlock() - var minTime pdata.Timestamp - var maxTime pdata.Timestamp + var minTime pcommon.Timestamp + var maxTime pcommon.Timestamp - return hasSpanWithCondition(batches, func(span pdata.Span) bool { + return hasSpanWithCondition(batches, func(span ptrace.Span) bool { if minTime == 0 || span.StartTimestamp() < minTime { minTime = span.StartTimestamp() } diff --git a/processor/tailsamplingprocessor/internal/sampling/latency_test.go b/processor/tailsamplingprocessor/internal/sampling/latency_test.go index 56d26b469f63..7a3b882309cc 100644 --- a/processor/tailsamplingprocessor/internal/sampling/latency_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/latency_test.go @@ -19,14 +19,15 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) func TestEvaluate_Latency(t *testing.T) { filter := NewLatency(zap.NewNop(), 5000) - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) now := time.Now() cases := []struct { @@ -86,17 +87,17 @@ type spanWithTimeAndDuration struct { } func newTraceWithSpans(spans []spanWithTimeAndDuration) *TraceData { - var traceBatches []pdata.Traces - traces := pdata.NewTraces() + var traceBatches []ptrace.Traces + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() for _, s := range spans { span := ils.Spans().AppendEmpty() - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) - span.SetStartTimestamp(pdata.NewTimestampFromTime(s.StartTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(s.StartTime.Add(s.Duration))) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(s.StartTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(s.StartTime.Add(s.Duration))) } traceBatches = append(traceBatches, traces) diff --git a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter.go b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter.go index 086e26d842f1..0b2b0230f76d 100644 --- a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter.go +++ b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter.go @@ -15,7 +15,8 @@ package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -39,12 +40,12 @@ func NewNumericAttributeFilter(logger *zap.Logger, key string, minValue, maxValu } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (naf *numericAttributeFilter) Evaluate(_ pdata.TraceID, trace *TraceData) (Decision, error) { +func (naf *numericAttributeFilter) Evaluate(_ pcommon.TraceID, trace *TraceData) (Decision, error) { trace.Lock() batches := trace.ReceivedBatches trace.Unlock() - return hasSpanWithCondition(batches, func(span pdata.Span) bool { + return hasSpanWithCondition(batches, func(span ptrace.Span) bool { if v, ok := span.Attributes().Get(naf.key); ok { value := v.IntVal() if value >= naf.minValue && value <= naf.maxValue { diff --git a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go index da183cf2b402..08bcc4cd0755 100644 --- a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go @@ -20,7 +20,8 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -67,7 +68,7 @@ func TestNumericTagFilter(t *testing.T) { for _, c := range cases { t.Run(c.Desc, func(t *testing.T) { u, _ := uuid.NewRandom() - decision, err := filter.Evaluate(pdata.NewTraceID(u), c.Trace) + decision, err := filter.Evaluate(pcommon.NewTraceID(u), c.Trace) assert.NoError(t, err) assert.Equal(t, decision, c.Decision) }) @@ -75,14 +76,14 @@ func TestNumericTagFilter(t *testing.T) { } func newTraceIntAttrs(nodeAttrs map[string]interface{}, spanAttrKey string, spanAttrValue int64) *TraceData { - var traceBatches []pdata.Traces - traces := pdata.NewTraces() + var traceBatches []ptrace.Traces + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() - pdata.NewMapFromRaw(nodeAttrs).CopyTo(rs.Resource().Attributes()) + pcommon.NewMapFromRaw(nodeAttrs).CopyTo(rs.Resource().Attributes()) ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) span.Attributes().InsertInt(spanAttrKey, spanAttrValue) traceBatches = append(traceBatches, traces) return &TraceData{ diff --git a/processor/tailsamplingprocessor/internal/sampling/policy.go b/processor/tailsamplingprocessor/internal/sampling/policy.go index 78ad092b51ff..b357a40dd6d7 100644 --- a/processor/tailsamplingprocessor/internal/sampling/policy.go +++ b/processor/tailsamplingprocessor/internal/sampling/policy.go @@ -18,7 +18,8 @@ import ( "sync" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) // TraceData stores the sampling related trace data. @@ -33,7 +34,7 @@ type TraceData struct { // SpanCount track the number of spans on the trace. SpanCount int64 // ReceivedBatches stores all the batches received for the trace. - ReceivedBatches []pdata.Traces + ReceivedBatches []ptrace.Traces } // Decision gives the status of sampling decision. @@ -67,5 +68,5 @@ const ( // which makes a sampling decision for a given trace when requested. type PolicyEvaluator interface { // Evaluate looks at the trace data and returns a corresponding SamplingDecision. - Evaluate(traceID pdata.TraceID, trace *TraceData) (Decision, error) + Evaluate(traceID pcommon.TraceID, trace *TraceData) (Decision, error) } diff --git a/processor/tailsamplingprocessor/internal/sampling/probabilistic.go b/processor/tailsamplingprocessor/internal/sampling/probabilistic.go index 590597fec203..0e997653ee9a 100644 --- a/processor/tailsamplingprocessor/internal/sampling/probabilistic.go +++ b/processor/tailsamplingprocessor/internal/sampling/probabilistic.go @@ -19,7 +19,7 @@ import ( "math" "math/big" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -51,7 +51,7 @@ func NewProbabilisticSampler(logger *zap.Logger, hashSalt string, samplingPercen } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (s *probabilisticSampler) Evaluate(traceID pdata.TraceID, _ *TraceData) (Decision, error) { +func (s *probabilisticSampler) Evaluate(traceID pcommon.TraceID, _ *TraceData) (Decision, error) { s.logger.Debug("Evaluating spans in probabilistic filter") traceIDBytes := traceID.Bytes() diff --git a/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go b/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go index 8eed286d38a5..e0c1420445f0 100644 --- a/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/probabilistic_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -82,7 +82,7 @@ func TestProbabilisticSampling(t *testing.T) { sampled := 0 for _, traceID := range genRandomTraceIDs(traceCount) { - trace := newTraceStringAttrs(pdata.NewMap(), "example", "value") + trace := newTraceStringAttrs(pcommon.NewMap(), "example", "value") decision, err := probabilisticSampler.Evaluate(traceID, trace) assert.NoError(t, err) @@ -100,14 +100,14 @@ func TestProbabilisticSampling(t *testing.T) { } } -func genRandomTraceIDs(num int) (ids []pdata.TraceID) { +func genRandomTraceIDs(num int) (ids []pcommon.TraceID) { r := rand.New(rand.NewSource(1)) - ids = make([]pdata.TraceID, 0, num) + ids = make([]pcommon.TraceID, 0, num) for i := 0; i < num; i++ { traceID := [16]byte{} binary.BigEndian.PutUint64(traceID[:8], r.Uint64()) binary.BigEndian.PutUint64(traceID[8:], r.Uint64()) - ids = append(ids, pdata.NewTraceID(traceID)) + ids = append(ids, pcommon.NewTraceID(traceID)) } return ids } diff --git a/processor/tailsamplingprocessor/internal/sampling/rate_limiting.go b/processor/tailsamplingprocessor/internal/sampling/rate_limiting.go index a277c4634cce..9308d6bd3dd2 100644 --- a/processor/tailsamplingprocessor/internal/sampling/rate_limiting.go +++ b/processor/tailsamplingprocessor/internal/sampling/rate_limiting.go @@ -17,7 +17,7 @@ package sampling // import "github.com/open-telemetry/opentelemetry-collector-co import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) @@ -39,7 +39,7 @@ func NewRateLimiting(logger *zap.Logger, spansPerSecond int64) PolicyEvaluator { } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (r *rateLimiting) Evaluate(_ pdata.TraceID, trace *TraceData) (Decision, error) { +func (r *rateLimiting) Evaluate(_ pcommon.TraceID, trace *TraceData) (Decision, error) { r.logger.Debug("Evaluating spans in rate-limiting filter") currSecond := time.Now().Unix() if r.currentSecond != currSecond { diff --git a/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go b/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go index a39575d39ce1..19c0bfbf61c9 100644 --- a/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/rate_limiting_test.go @@ -18,13 +18,13 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" ) func TestRateLimiter(t *testing.T) { - trace := newTraceStringAttrs(pdata.NewMap(), "example", "value") - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) + trace := newTraceStringAttrs(pcommon.NewMap(), "example", "value") + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) rateLimiter := NewRateLimiting(zap.NewNop(), 3) // Trace span count greater than spans per second diff --git a/processor/tailsamplingprocessor/internal/sampling/status_code.go b/processor/tailsamplingprocessor/internal/sampling/status_code.go index be003b6b093a..7ac0dfc34dfc 100644 --- a/processor/tailsamplingprocessor/internal/sampling/status_code.go +++ b/processor/tailsamplingprocessor/internal/sampling/status_code.go @@ -18,13 +18,14 @@ import ( "errors" "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) type statusCodeFilter struct { logger *zap.Logger - statusCodes []pdata.StatusCode + statusCodes []ptrace.StatusCode } var _ PolicyEvaluator = (*statusCodeFilter)(nil) @@ -36,16 +37,16 @@ func NewStatusCodeFilter(logger *zap.Logger, statusCodeString []string) (PolicyE return nil, errors.New("expected at least one status code to filter on") } - statusCodes := make([]pdata.StatusCode, len(statusCodeString)) + statusCodes := make([]ptrace.StatusCode, len(statusCodeString)) for i := range statusCodeString { switch statusCodeString[i] { case "OK": - statusCodes[i] = pdata.StatusCodeOk + statusCodes[i] = ptrace.StatusCodeOk case "ERROR": - statusCodes[i] = pdata.StatusCodeError + statusCodes[i] = ptrace.StatusCodeError case "UNSET": - statusCodes[i] = pdata.StatusCodeUnset + statusCodes[i] = ptrace.StatusCodeUnset default: return nil, fmt.Errorf("unknown status code %q, supported: OK, ERROR, UNSET", statusCodeString[i]) } @@ -58,14 +59,14 @@ func NewStatusCodeFilter(logger *zap.Logger, statusCodeString []string) (PolicyE } // Evaluate looks at the trace data and returns a corresponding SamplingDecision. -func (r *statusCodeFilter) Evaluate(_ pdata.TraceID, trace *TraceData) (Decision, error) { +func (r *statusCodeFilter) Evaluate(_ pcommon.TraceID, trace *TraceData) (Decision, error) { r.logger.Debug("Evaluating spans in status code filter") trace.Lock() batches := trace.ReceivedBatches trace.Unlock() - return hasSpanWithCondition(batches, func(span pdata.Span) bool { + return hasSpanWithCondition(batches, func(span ptrace.Span) bool { for _, statusCode := range r.statusCodes { if span.Status().Code() == statusCode { return true diff --git a/processor/tailsamplingprocessor/internal/sampling/status_code_test.go b/processor/tailsamplingprocessor/internal/sampling/status_code_test.go index 250723c7f0fd..14754bf77d13 100644 --- a/processor/tailsamplingprocessor/internal/sampling/status_code_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/status_code_test.go @@ -18,7 +18,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -31,55 +32,55 @@ func TestNewStatusCodeFilter_errorHandling(t *testing.T) { } func TestStatusCodeSampling(t *testing.T) { - traceID := pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) + traceID := pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}) cases := []struct { Desc string StatusCodesToFilterOn []string - StatusCodesPresent []pdata.StatusCode + StatusCodesPresent []ptrace.StatusCode Decision Decision }{ { Desc: "filter on ERROR - none match", StatusCodesToFilterOn: []string{"ERROR"}, - StatusCodesPresent: []pdata.StatusCode{pdata.StatusCodeOk, pdata.StatusCodeUnset, pdata.StatusCodeOk}, + StatusCodesPresent: []ptrace.StatusCode{ptrace.StatusCodeOk, ptrace.StatusCodeUnset, ptrace.StatusCodeOk}, Decision: NotSampled, }, { Desc: "filter on OK and ERROR - none match", StatusCodesToFilterOn: []string{"OK", "ERROR"}, - StatusCodesPresent: []pdata.StatusCode{pdata.StatusCodeUnset, pdata.StatusCodeUnset}, + StatusCodesPresent: []ptrace.StatusCode{ptrace.StatusCodeUnset, ptrace.StatusCodeUnset}, Decision: NotSampled, }, { Desc: "filter on UNSET - matches", StatusCodesToFilterOn: []string{"UNSET"}, - StatusCodesPresent: []pdata.StatusCode{pdata.StatusCodeUnset}, + StatusCodesPresent: []ptrace.StatusCode{ptrace.StatusCodeUnset}, Decision: Sampled, }, { Desc: "filter on OK and UNSET - matches", StatusCodesToFilterOn: []string{"OK", "UNSET"}, - StatusCodesPresent: []pdata.StatusCode{pdata.StatusCodeError, pdata.StatusCodeOk}, + StatusCodesPresent: []ptrace.StatusCode{ptrace.StatusCodeError, ptrace.StatusCodeOk}, Decision: Sampled, }, } for _, c := range cases { t.Run(c.Desc, func(t *testing.T) { - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() ils := rs.ScopeSpans().AppendEmpty() for _, statusCode := range c.StatusCodesPresent { span := ils.Spans().AppendEmpty() span.Status().SetCode(statusCode) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) } trace := &TraceData{ - ReceivedBatches: []pdata.Traces{traces}, + ReceivedBatches: []ptrace.Traces{traces}, } statusCodeFilter, err := NewStatusCodeFilter(zap.NewNop(), c.StatusCodesToFilterOn) diff --git a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go index ee81320bfaca..7b3e85cf85ac 100644 --- a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go +++ b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go @@ -18,7 +18,8 @@ import ( "regexp" "github.com/golang/groupcache/lru" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -99,7 +100,7 @@ func NewStringAttributeFilter(logger *zap.Logger, key string, values []string, r // Evaluate looks at the trace data and returns a corresponding SamplingDecision. // The SamplingDecision is made by comparing the attribute values with the matching values, // which might be static strings or regular expressions. -func (saf *stringAttributeFilter) Evaluate(_ pdata.TraceID, trace *TraceData) (Decision, error) { +func (saf *stringAttributeFilter) Evaluate(_ pcommon.TraceID, trace *TraceData) (Decision, error) { saf.logger.Debug("Evaluting spans in string-tag filter") trace.Lock() batches := trace.ReceivedBatches @@ -109,7 +110,7 @@ func (saf *stringAttributeFilter) Evaluate(_ pdata.TraceID, trace *TraceData) (D // Invert Match returns true by default, except when key and value are matched return invertHasResourceOrSpanWithCondition( batches, - func(resource pdata.Resource) bool { + func(resource pcommon.Resource) bool { if v, ok := resource.Attributes().Get(saf.key); ok { if ok := saf.matcher(v.StringVal()); ok { return false @@ -117,7 +118,7 @@ func (saf *stringAttributeFilter) Evaluate(_ pdata.TraceID, trace *TraceData) (D } return true }, - func(span pdata.Span) bool { + func(span ptrace.Span) bool { if v, ok := span.Attributes().Get(saf.key); ok { truncableStr := v.StringVal() if len(truncableStr) > 0 { @@ -133,7 +134,7 @@ func (saf *stringAttributeFilter) Evaluate(_ pdata.TraceID, trace *TraceData) (D return hasResourceOrSpanWithCondition( batches, - func(resource pdata.Resource) bool { + func(resource pcommon.Resource) bool { if v, ok := resource.Attributes().Get(saf.key); ok { if ok := saf.matcher(v.StringVal()); ok { return true @@ -141,7 +142,7 @@ func (saf *stringAttributeFilter) Evaluate(_ pdata.TraceID, trace *TraceData) (D } return false }, - func(span pdata.Span) bool { + func(span ptrace.Span) bool { if v, ok := span.Attributes().Get(saf.key); ok { truncableStr := v.StringVal() if len(truncableStr) > 0 { diff --git a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go index d784c722e607..1afdce2c2d40 100644 --- a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go @@ -18,7 +18,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -41,169 +42,169 @@ func TestStringTagFilter(t *testing.T) { }{ { Desc: "nonmatching node attribute key", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"non_matching": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"non_matching": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize}, Decision: NotSampled, }, { Desc: "nonmatching node attribute value", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "non_matching"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "non_matching"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize}, Decision: NotSampled, }, { Desc: "matching node attribute", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize}, Decision: Sampled, }, { Desc: "nonmatching span attribute key", - Trace: newTraceStringAttrs(pdata.NewMap(), "nonmatching", "value"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "nonmatching", "value"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize}, Decision: NotSampled, }, { Desc: "nonmatching span attribute value", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "nonmatching"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "nonmatching"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize}, Decision: NotSampled, }, { Desc: "matching span attribute", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "value"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "value"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize}, Decision: Sampled, }, { Desc: "matching span attribute with regex", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"v[0-9]+.HealthCheck$"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize}, Decision: Sampled, }, { Desc: "nonmatching span attribute with regex", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"v[a-z]+.HealthCheck$"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize}, Decision: NotSampled, }, { Desc: "matching span attribute with regex without CacheSize provided in config", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"v[0-9]+.HealthCheck$"}, EnabledRegexMatching: true}, Decision: Sampled, }, { Desc: "matching plain text node attribute in regex", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize}, Decision: Sampled, }, { Desc: "nonmatching span attribute on empty filter list", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{}, EnabledRegexMatching: true}, Decision: NotSampled, }, { Desc: "invert nonmatching node attribute key", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"non_matching": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"non_matching": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert nonmatching node attribute value", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "non_matching"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "non_matching"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert nonmatching node attribute list", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "non_matching"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "non_matching"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"first_value", "value", "last_value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert matching node attribute", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert matching node attribute list", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"first_value", "value", "last_value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert nonmatching span attribute key", - Trace: newTraceStringAttrs(pdata.NewMap(), "nonmatching", "value"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "nonmatching", "value"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert nonmatching span attribute value", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "nonmatching"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "nonmatching"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert nonmatching span attribute list", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "nonmatching"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "nonmatching"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"first_value", "value", "last_value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert matching span attribute", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "value"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "value"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert matching span attribute list", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "value"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "value"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"first_value", "value", "last_value"}, EnabledRegexMatching: false, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert matching span attribute with regex", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"v[0-9]+.HealthCheck$"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert matching span attribute with regex list", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"^http", "v[0-9]+.HealthCheck$", "metrics$"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert nonmatching span attribute with regex", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"v[a-z]+.HealthCheck$"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert nonmatching span attribute with regex list", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"^http", "v[a-z]+.HealthCheck$", "metrics$"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertSampled, }, { Desc: "invert matching plain text node attribute in regex", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"value"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert matching plain text node attribute in regex list", - Trace: newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), + Trace: newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", ""), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{"first_value", "value", "last_value"}, EnabledRegexMatching: true, CacheMaxSize: defaultCacheSize, InvertMatch: true}, Decision: InvertNotSampled, }, { Desc: "invert nonmatching span attribute on empty filter list", - Trace: newTraceStringAttrs(pdata.NewMap(), "example", "grpc.health.v1.HealthCheck"), + Trace: newTraceStringAttrs(pcommon.NewMap(), "example", "grpc.health.v1.HealthCheck"), filterCfg: &TestStringAttributeCfg{Key: "example", Values: []string{}, EnabledRegexMatching: true, InvertMatch: true}, Decision: InvertSampled, }, @@ -212,7 +213,7 @@ func TestStringTagFilter(t *testing.T) { for _, c := range cases { t.Run(c.Desc, func(t *testing.T) { filter := NewStringAttributeFilter(zap.NewNop(), c.filterCfg.Key, c.filterCfg.Values, c.filterCfg.EnabledRegexMatching, c.filterCfg.CacheMaxSize, c.filterCfg.InvertMatch) - decision, err := filter.Evaluate(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), c.Trace) + decision, err := filter.Evaluate(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), c.Trace) assert.NoError(t, err) assert.Equal(t, decision, c.Decision) }) @@ -220,32 +221,32 @@ func TestStringTagFilter(t *testing.T) { } func BenchmarkStringTagFilterEvaluatePlainText(b *testing.B) { - trace := newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", "") + trace := newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "value"}), "", "") filter := NewStringAttributeFilter(zap.NewNop(), "example", []string{"value"}, false, 0, false) b.ResetTimer() for i := 0; i < b.N; i++ { - filter.Evaluate(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), trace) + filter.Evaluate(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), trace) } } func BenchmarkStringTagFilterEvaluateRegex(b *testing.B) { - trace := newTraceStringAttrs(pdata.NewMapFromRaw(map[string]interface{}{"example": "grpc.health.v1.HealthCheck"}), "", "") + trace := newTraceStringAttrs(pcommon.NewMapFromRaw(map[string]interface{}{"example": "grpc.health.v1.HealthCheck"}), "", "") filter := NewStringAttributeFilter(zap.NewNop(), "example", []string{"v[0-9]+.HealthCheck$"}, true, 0, false) b.ResetTimer() for i := 0; i < b.N; i++ { - filter.Evaluate(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), trace) + filter.Evaluate(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}), trace) } } -func newTraceStringAttrs(nodeAttrs pdata.Map, spanAttrKey string, spanAttrValue string) *TraceData { - var traceBatches []pdata.Traces - traces := pdata.NewTraces() +func newTraceStringAttrs(nodeAttrs pcommon.Map, spanAttrKey string, spanAttrValue string) *TraceData { + var traceBatches []ptrace.Traces + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() nodeAttrs.CopyTo(rs.Resource().Attributes()) ils := rs.ScopeSpans().AppendEmpty() span := ils.Spans().AppendEmpty() - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) span.Attributes().InsertString(spanAttrKey, spanAttrValue) traceBatches = append(traceBatches, traces) return &TraceData{ diff --git a/processor/tailsamplingprocessor/internal/sampling/util.go b/processor/tailsamplingprocessor/internal/sampling/util.go index be7a98a1694d..a72a7bfe0a26 100644 --- a/processor/tailsamplingprocessor/internal/sampling/util.go +++ b/processor/tailsamplingprocessor/internal/sampling/util.go @@ -14,14 +14,17 @@ package sampling // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor/internal/sampling" -import "go.opentelemetry.io/collector/model/pdata" +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" +) // hasResourceOrSpanWithCondition iterates through all the resources and instrumentation library spans until any // callback returns true. func hasResourceOrSpanWithCondition( - batches []pdata.Traces, - shouldSampleResource func(resource pdata.Resource) bool, - shouldSampleSpan func(span pdata.Span) bool, + batches []ptrace.Traces, + shouldSampleResource func(resource pcommon.Resource) bool, + shouldSampleSpan func(span ptrace.Span) bool, ) Decision { for _, batch := range batches { rspans := batch.ResourceSpans() @@ -45,9 +48,9 @@ func hasResourceOrSpanWithCondition( // invertHasResourceOrSpanWithCondition iterates through all the resources and instrumentation library spans until any // callback returns false. func invertHasResourceOrSpanWithCondition( - batches []pdata.Traces, - shouldSampleResource func(resource pdata.Resource) bool, - shouldSampleSpan func(span pdata.Span) bool, + batches []ptrace.Traces, + shouldSampleResource func(resource pcommon.Resource) bool, + shouldSampleSpan func(span ptrace.Span) bool, ) Decision { for _, batch := range batches { rspans := batch.ResourceSpans() @@ -69,7 +72,7 @@ func invertHasResourceOrSpanWithCondition( } // hasSpanWithCondition iterates through all the instrumentation library spans until any callback returns true. -func hasSpanWithCondition(batches []pdata.Traces, shouldSample func(span pdata.Span) bool) Decision { +func hasSpanWithCondition(batches []ptrace.Traces, shouldSample func(span ptrace.Span) bool) Decision { for _, batch := range batches { rspans := batch.ResourceSpans() @@ -84,7 +87,7 @@ func hasSpanWithCondition(batches []pdata.Traces, shouldSample func(span pdata.S return NotSampled } -func hasInstrumentationLibrarySpanWithCondition(ilss pdata.ScopeSpansSlice, check func(span pdata.Span) bool) bool { +func hasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool { for i := 0; i < ilss.Len(); i++ { ils := ilss.At(i) @@ -99,7 +102,7 @@ func hasInstrumentationLibrarySpanWithCondition(ilss pdata.ScopeSpansSlice, chec return false } -func invertHasInstrumentationLibrarySpanWithCondition(ilss pdata.ScopeSpansSlice, check func(span pdata.Span) bool) bool { +func invertHasInstrumentationLibrarySpanWithCondition(ilss ptrace.ScopeSpansSlice, check func(span ptrace.Span) bool) bool { for i := 0; i < ilss.Len(); i++ { ils := ilss.At(i) diff --git a/processor/tailsamplingprocessor/processor.go b/processor/tailsamplingprocessor/processor.go index 5785872aceb3..0eba1102ff69 100644 --- a/processor/tailsamplingprocessor/processor.go +++ b/processor/tailsamplingprocessor/processor.go @@ -27,7 +27,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" @@ -58,7 +59,7 @@ type tailSamplingSpanProcessor struct { policyTicker timeutils.TTicker tickerFrequency time.Duration decisionBatcher idbatcher.Batcher - deleteChan chan pdata.TraceID + deleteChan chan pcommon.TraceID numTracesOnMap uint64 } @@ -110,7 +111,7 @@ func newTracesProcessor(logger *zap.Logger, nextConsumer consumer.Traces, cfg Co } tsp.policyTicker = &timeutils.PolicyTicker{OnTickFunc: tsp.samplingPolicyOnTick} - tsp.deleteChan = make(chan pdata.TraceID, cfg.NumTraces) + tsp.deleteChan = make(chan pcommon.TraceID, cfg.NumTraces) return tsp, nil } @@ -180,7 +181,7 @@ func (tsp *tailSamplingSpanProcessor) samplingPolicyOnTick() { // Combine all individual batches into a single batch so // consumers may operate on the entire trace - allSpans := pdata.NewTraces() + allSpans := ptrace.NewTraces() for j := 0; j < len(traceBatches); j++ { batch := traceBatches[j] batch.ResourceSpans().MoveAndAppendTo(allSpans.ResourceSpans()) @@ -205,7 +206,7 @@ func (tsp *tailSamplingSpanProcessor) samplingPolicyOnTick() { ) } -func (tsp *tailSamplingSpanProcessor) makeDecision(id pdata.TraceID, trace *sampling.TraceData, metrics *policyMetrics) (sampling.Decision, *policy) { +func (tsp *tailSamplingSpanProcessor) makeDecision(id pcommon.TraceID, trace *sampling.TraceData, metrics *policyMetrics) (sampling.Decision, *policy) { finalDecision := sampling.NotSampled var matchingPolicy *policy samplingDecision := map[sampling.Decision]bool{ @@ -289,7 +290,7 @@ func (tsp *tailSamplingSpanProcessor) makeDecision(id pdata.TraceID, trace *samp } // ConsumeTraceData is required by the SpanProcessor interface. -func (tsp *tailSamplingSpanProcessor) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (tsp *tailSamplingSpanProcessor) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { resourceSpans := td.ResourceSpans() for i := 0; i < resourceSpans.Len(); i++ { tsp.processTraces(resourceSpans.At(i)) @@ -297,8 +298,8 @@ func (tsp *tailSamplingSpanProcessor) ConsumeTraces(ctx context.Context, td pdat return nil } -func (tsp *tailSamplingSpanProcessor) groupSpansByTraceKey(resourceSpans pdata.ResourceSpans) map[pdata.TraceID][]*pdata.Span { - idToSpans := make(map[pdata.TraceID][]*pdata.Span) +func (tsp *tailSamplingSpanProcessor) groupSpansByTraceKey(resourceSpans ptrace.ResourceSpans) map[pcommon.TraceID][]*ptrace.Span { + idToSpans := make(map[pcommon.TraceID][]*ptrace.Span) ilss := resourceSpans.ScopeSpans() for j := 0; j < ilss.Len(); j++ { spans := ilss.At(j).Spans() @@ -312,7 +313,7 @@ func (tsp *tailSamplingSpanProcessor) groupSpansByTraceKey(resourceSpans pdata.R return idToSpans } -func (tsp *tailSamplingSpanProcessor) processTraces(resourceSpans pdata.ResourceSpans) { +func (tsp *tailSamplingSpanProcessor) processTraces(resourceSpans ptrace.ResourceSpans) { // Group spans per their traceId to minimize contention on idToTrace idToSpans := tsp.groupSpansByTraceKey(resourceSpans) var newTraceIDs int64 @@ -351,7 +352,7 @@ func (tsp *tailSamplingSpanProcessor) processTraces(resourceSpans pdata.Resource } for i, p := range tsp.policies { - var traceTd pdata.Traces + var traceTd ptrace.Traces actualData.Lock() actualDecision := actualData.Decisions[i] // If decision is pending, we want to add the new spans still under the lock, so the decision doesn't happen @@ -412,7 +413,7 @@ func (tsp *tailSamplingSpanProcessor) Shutdown(context.Context) error { return nil } -func (tsp *tailSamplingSpanProcessor) dropTrace(traceID pdata.TraceID, deletionTime time.Time) { +func (tsp *tailSamplingSpanProcessor) dropTrace(traceID pcommon.TraceID, deletionTime time.Time) { var trace *sampling.TraceData if d, ok := tsp.idToTrace.Load(traceID); ok { trace = d.(*sampling.TraceData) @@ -428,8 +429,8 @@ func (tsp *tailSamplingSpanProcessor) dropTrace(traceID pdata.TraceID, deletionT stats.Record(tsp.ctx, statTraceRemovalAgeSec.M(int64(deletionTime.Sub(trace.ArrivalTime)/time.Second))) } -func prepareTraceBatch(rss pdata.ResourceSpans, spans []*pdata.Span) pdata.Traces { - traceTd := pdata.NewTraces() +func prepareTraceBatch(rss ptrace.ResourceSpans, spans []*ptrace.Span) ptrace.Traces { + traceTd := ptrace.NewTraces() rs := traceTd.ResourceSpans().AppendEmpty() rss.Resource().CopyTo(rs.Resource()) ils := rs.ScopeSpans().AppendEmpty() diff --git a/processor/tailsamplingprocessor/processor_test.go b/processor/tailsamplingprocessor/processor_test.go index 48a47c375cc9..7a1ab90a1a69 100644 --- a/processor/tailsamplingprocessor/processor_test.go +++ b/processor/tailsamplingprocessor/processor_test.go @@ -26,7 +26,8 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/timeutils" @@ -89,11 +90,11 @@ func TestConcurrentTraceArrival(t *testing.T) { for _, batch := range batches { // Add the same traceId twice. wg.Add(2) - go func(td pdata.Traces) { + go func(td ptrace.Traces) { tsp.ConsumeTraces(context.Background(), td) wg.Done() }(batch) - go func(td pdata.Traces) { + go func(td ptrace.Traces) { tsp.ConsumeTraces(context.Background(), td) wg.Done() }(batch) @@ -157,7 +158,7 @@ func TestConcurrentTraceMapSize(t *testing.T) { for _, batch := range batches { wg.Add(1) - go func(td pdata.Traces) { + go func(td ptrace.Traces) { tsp.ConsumeTraces(context.Background(), td) wg.Done() }(batch) @@ -190,7 +191,7 @@ func TestSamplingPolicyTypicalPath(t *testing.T) { logger: zap.NewNop(), decisionBatcher: newSyncIDBatcher(decisionWaitSeconds), policies: []*policy{{name: "mock-policy", evaluator: mpe, ctx: context.TODO()}}, - deleteChan: make(chan pdata.TraceID, maxSize), + deleteChan: make(chan pcommon.TraceID, maxSize), policyTicker: mtt, tickerFrequency: 100 * time.Millisecond, } @@ -250,7 +251,7 @@ func TestSamplingPolicyInvertSampled(t *testing.T) { logger: zap.NewNop(), decisionBatcher: newSyncIDBatcher(decisionWaitSeconds), policies: []*policy{{name: "mock-policy", evaluator: mpe, ctx: context.TODO()}}, - deleteChan: make(chan pdata.TraceID, maxSize), + deleteChan: make(chan pcommon.TraceID, maxSize), policyTicker: mtt, tickerFrequency: 100 * time.Millisecond, } @@ -317,7 +318,7 @@ func TestSamplingMultiplePolicies(t *testing.T) { { name: "policy-2", evaluator: mpe2, ctx: context.TODO(), }}, - deleteChan: make(chan pdata.TraceID, maxSize), + deleteChan: make(chan pcommon.TraceID, maxSize), policyTicker: mtt, tickerFrequency: 100 * time.Millisecond, } @@ -379,7 +380,7 @@ func TestSamplingPolicyDecisionNotSampled(t *testing.T) { logger: zap.NewNop(), decisionBatcher: newSyncIDBatcher(decisionWaitSeconds), policies: []*policy{{name: "mock-policy", evaluator: mpe, ctx: context.TODO()}}, - deleteChan: make(chan pdata.TraceID, maxSize), + deleteChan: make(chan pcommon.TraceID, maxSize), policyTicker: mtt, tickerFrequency: 100 * time.Millisecond, } @@ -441,7 +442,7 @@ func TestSamplingPolicyDecisionInvertNotSampled(t *testing.T) { logger: zap.NewNop(), decisionBatcher: newSyncIDBatcher(decisionWaitSeconds), policies: []*policy{{name: "mock-policy", evaluator: mpe, ctx: context.TODO()}}, - deleteChan: make(chan pdata.TraceID, maxSize), + deleteChan: make(chan pcommon.TraceID, maxSize), policyTicker: mtt, tickerFrequency: 100 * time.Millisecond, } @@ -503,7 +504,7 @@ func TestMultipleBatchesAreCombinedIntoOne(t *testing.T) { logger: zap.NewNop(), decisionBatcher: newSyncIDBatcher(decisionWaitSeconds), policies: []*policy{{name: "mock-policy", evaluator: mpe, ctx: context.TODO()}}, - deleteChan: make(chan pdata.TraceID, maxSize), + deleteChan: make(chan pcommon.TraceID, maxSize), policyTicker: mtt, tickerFrequency: 100 * time.Millisecond, } @@ -524,15 +525,15 @@ func TestMultipleBatchesAreCombinedIntoOne(t *testing.T) { require.EqualValues(t, 3, len(msp.AllTraces()), "There should be three batches, one for each trace") - expectedSpanIds := make(map[int][]pdata.SpanID) - expectedSpanIds[0] = []pdata.SpanID{ + expectedSpanIds := make(map[int][]pcommon.SpanID) + expectedSpanIds[0] = []pcommon.SpanID{ uInt64ToSpanID(uint64(1)), } - expectedSpanIds[1] = []pdata.SpanID{ + expectedSpanIds[1] = []pcommon.SpanID{ uInt64ToSpanID(uint64(2)), uInt64ToSpanID(uint64(3)), } - expectedSpanIds[2] = []pdata.SpanID{ + expectedSpanIds[2] = []pcommon.SpanID{ uInt64ToSpanID(uint64(4)), uInt64ToSpanID(uint64(5)), uInt64ToSpanID(uint64(6)), @@ -560,8 +561,8 @@ func TestMultipleBatchesAreCombinedIntoOne(t *testing.T) { } } -func collectSpanIds(trace *pdata.Traces) []pdata.SpanID { - spanIDs := make([]pdata.SpanID, 0) +func collectSpanIds(trace *ptrace.Traces) []pcommon.SpanID { + spanIDs := make([]pcommon.SpanID, 0) for i := 0; i < trace.ResourceSpans().Len(); i++ { ilss := trace.ResourceSpans().At(i).ScopeSpans() @@ -579,7 +580,7 @@ func collectSpanIds(trace *pdata.Traces) []pdata.SpanID { return spanIDs } -func findTrace(a []pdata.Traces, traceID pdata.TraceID) *pdata.Traces { +func findTrace(a []ptrace.Traces, traceID pcommon.TraceID) *ptrace.Traces { for _, batch := range a { id := batch.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).TraceID() if traceID.Bytes() == id.Bytes() { @@ -589,15 +590,15 @@ func findTrace(a []pdata.Traces, traceID pdata.TraceID) *pdata.Traces { return nil } -func generateIdsAndBatches(numIds int) ([]pdata.TraceID, []pdata.Traces) { - traceIds := make([]pdata.TraceID, numIds) +func generateIdsAndBatches(numIds int) ([]pcommon.TraceID, []ptrace.Traces) { + traceIds := make([]pcommon.TraceID, numIds) spanID := 0 - var tds []pdata.Traces + var tds []ptrace.Traces for i := 0; i < numIds; i++ { traceID := [16]byte{} binary.BigEndian.PutUint64(traceID[:8], 1) binary.BigEndian.PutUint64(traceID[8:], uint64(i+1)) - traceIds[i] = pdata.NewTraceID(traceID) + traceIds[i] = pcommon.NewTraceID(traceID) // Send each span in a separate batch for j := 0; j <= i; j++ { td := simpleTraces() @@ -613,11 +614,11 @@ func generateIdsAndBatches(numIds int) ([]pdata.TraceID, []pdata.Traces) { return traceIds, tds } -// uInt64ToSpanID converts the uint64 representation of a SpanID to pdata.SpanID. -func uInt64ToSpanID(id uint64) pdata.SpanID { +// uInt64ToSpanID converts the uint64 representation of a SpanID to pcommon.SpanID. +func uInt64ToSpanID(id uint64) pcommon.SpanID { spanID := [8]byte{} binary.BigEndian.PutUint64(spanID[:], id) - return pdata.NewSpanID(spanID) + return pcommon.NewSpanID(spanID) } type mockPolicyEvaluator struct { @@ -628,7 +629,7 @@ type mockPolicyEvaluator struct { var _ sampling.PolicyEvaluator = (*mockPolicyEvaluator)(nil) -func (m *mockPolicyEvaluator) Evaluate(pdata.TraceID, *sampling.TraceData) (sampling.Decision, error) { +func (m *mockPolicyEvaluator) Evaluate(pcommon.TraceID, *sampling.TraceData) (sampling.Decision, error) { m.EvaluationCount++ return m.NextDecision, m.NextError } @@ -667,7 +668,7 @@ func newSyncIDBatcher(numBatches uint64) idbatcher.Batcher { } } -func (s *syncIDBatcher) AddToCurrentBatch(id pdata.TraceID) { +func (s *syncIDBatcher) AddToCurrentBatch(id pcommon.TraceID) { s.Lock() s.openBatch = append(s.openBatch, id) s.Unlock() @@ -685,12 +686,12 @@ func (s *syncIDBatcher) CloseCurrentAndTakeFirstBatch() (idbatcher.Batch, bool) func (s *syncIDBatcher) Stop() { } -func simpleTraces() pdata.Traces { - return simpleTracesWithID(pdata.NewTraceID([16]byte{1, 2, 3, 4})) +func simpleTraces() ptrace.Traces { + return simpleTracesWithID(pcommon.NewTraceID([16]byte{1, 2, 3, 4})) } -func simpleTracesWithID(traceID pdata.TraceID) pdata.Traces { - traces := pdata.NewTraces() +func simpleTracesWithID(traceID pcommon.TraceID) ptrace.Traces { + traces := ptrace.NewTraces() traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty().SetTraceID(traceID) return traces } diff --git a/processor/transformprocessor/factory_test.go b/processor/transformprocessor/factory_test.go index 410b8d9e991a..af4df80f6030 100644 --- a/processor/transformprocessor/factory_test.go +++ b/processor/transformprocessor/factory_test.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configtest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) @@ -74,7 +74,7 @@ func TestFactoryCreateTracesProcessor(t *testing.T) { assert.NotNil(t, tp) assert.NoError(t, err) - td := pdata.NewTraces() + td := ptrace.NewTraces() span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("operationA") diff --git a/processor/transformprocessor/go.mod b/processor/transformprocessor/go.mod index 53ec85c78ee8..93dce22e6f4a 100644 --- a/processor/transformprocessor/go.mod +++ b/processor/transformprocessor/go.mod @@ -5,8 +5,8 @@ go 1.17 require ( github.com/alecthomas/participle/v2 v2.0.0-alpha8 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -15,7 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -23,7 +23,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -33,3 +32,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/processor/transformprocessor/go.sum b/processor/transformprocessor/go.sum index 0f838f14f5a6..70e7c144705a 100644 --- a/processor/transformprocessor/go.sum +++ b/processor/transformprocessor/go.sum @@ -19,7 +19,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -74,7 +74,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -104,8 +103,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -152,8 +151,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -169,10 +166,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -212,7 +209,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -235,7 +232,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/processor/transformprocessor/internal/common/condition_test.go b/processor/transformprocessor/internal/common/condition_test.go index 5b42e27de725..489699a209f3 100644 --- a/processor/transformprocessor/internal/common/condition_test.go +++ b/processor/transformprocessor/internal/common/condition_test.go @@ -19,16 +19,17 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func Test_newConditionEvaluator(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("bear") tests := []struct { name string cond *Condition - matching pdata.Span + matching ptrace.Span }{ { name: "literals match", @@ -106,8 +107,8 @@ func Test_newConditionEvaluator(t *testing.T) { assert.NoError(t, err) assert.True(t, evaluate(testTransformContext{ span: tt.matching, - il: pdata.NewInstrumentationScope(), - resource: pdata.NewResource(), + il: pcommon.NewInstrumentationScope(), + resource: pcommon.NewResource(), })) }) } @@ -129,20 +130,20 @@ func Test_newConditionEvaluator(t *testing.T) { // Small copy of traces data model for use in common tests type testTransformContext struct { - span pdata.Span - il pdata.InstrumentationScope - resource pdata.Resource + span ptrace.Span + il pcommon.InstrumentationScope + resource pcommon.Resource } func (ctx testTransformContext) GetItem() interface{} { return ctx.span } -func (ctx testTransformContext) GetInstrumentationScope() pdata.InstrumentationScope { +func (ctx testTransformContext) GetInstrumentationScope() pcommon.InstrumentationScope { return ctx.il } -func (ctx testTransformContext) GetResource() pdata.Resource { +func (ctx testTransformContext) GetResource() pcommon.Resource { return ctx.resource } @@ -164,10 +165,10 @@ func testParsePath(val *Path) (GetSetter, error) { if val != nil && len(val.Fields) > 0 && val.Fields[0].Name == "name" { return &testGetSetter{ getter: func(ctx TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Name() + return ctx.GetItem().(ptrace.Span).Name() }, setter: func(ctx TransformContext, val interface{}) { - ctx.GetItem().(pdata.Span).SetName(val.(string)) + ctx.GetItem().(ptrace.Span).SetName(val.(string)) }, }, nil } diff --git a/processor/transformprocessor/internal/common/expression.go b/processor/transformprocessor/internal/common/expression.go index d88a0db1a24a..245b81117411 100644 --- a/processor/transformprocessor/internal/common/expression.go +++ b/processor/transformprocessor/internal/common/expression.go @@ -17,13 +17,13 @@ package common // import "github.com/open-telemetry/opentelemetry-collector-cont import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) type TransformContext interface { GetItem() interface{} - GetInstrumentationScope() pdata.InstrumentationScope - GetResource() pdata.Resource + GetInstrumentationScope() pcommon.InstrumentationScope + GetResource() pcommon.Resource } type ExprFunc func(ctx TransformContext) interface{} diff --git a/processor/transformprocessor/internal/common/expression_test.go b/processor/transformprocessor/internal/common/expression_test.go index c715b224b46b..beb0a0850e15 100644 --- a/processor/transformprocessor/internal/common/expression_test.go +++ b/processor/transformprocessor/internal/common/expression_test.go @@ -18,7 +18,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) func hello() ExprFunc { @@ -28,7 +29,7 @@ func hello() ExprFunc { } func Test_newGetter(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() span.SetName("bear") tests := []struct { name string @@ -88,8 +89,8 @@ func Test_newGetter(t *testing.T) { assert.NoError(t, err) val := reader.Get(testTransformContext{ span: span, - il: pdata.NewInstrumentationScope(), - resource: pdata.NewResource(), + il: pcommon.NewInstrumentationScope(), + resource: pcommon.NewResource(), }) assert.Equal(t, tt.want, val) }) diff --git a/processor/transformprocessor/internal/common/functions.go b/processor/transformprocessor/internal/common/functions.go index d24d735619b3..911f4937ebad 100644 --- a/processor/transformprocessor/internal/common/functions.go +++ b/processor/transformprocessor/internal/common/functions.go @@ -18,7 +18,7 @@ import ( "fmt" "reflect" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) var registry = map[string]interface{}{ @@ -54,11 +54,11 @@ func keepKeys(target GetSetter, keys []string) ExprFunc { return nil } - if attrs, ok := val.(pdata.Map); ok { + if attrs, ok := val.(pcommon.Map); ok { // TODO(anuraaga): Avoid copying when filtering keys https://github.com/open-telemetry/opentelemetry-collector/issues/4756 - filtered := pdata.NewMap() + filtered := pcommon.NewMap() filtered.EnsureCapacity(attrs.Len()) - attrs.Range(func(key string, val pdata.Value) bool { + attrs.Range(func(key string, val pcommon.Value) bool { if _, ok := keySet[key]; ok { filtered.Insert(key, val) } diff --git a/processor/transformprocessor/internal/traces/functions_test.go b/processor/transformprocessor/internal/traces/functions_test.go index 33acb171d095..956b6a6fdccf 100644 --- a/processor/transformprocessor/internal/traces/functions_test.go +++ b/processor/transformprocessor/internal/traces/functions_test.go @@ -18,15 +18,16 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) func Test_newFunctionCall(t *testing.T) { - input := pdata.NewSpan() + input := ptrace.NewSpan() input.SetName("bear") - attrs := pdata.NewMap() + attrs := pcommon.NewMap() attrs.InsertString("test", "1") attrs.InsertInt("test2", 3) attrs.InsertBool("test3", true) @@ -35,7 +36,7 @@ func Test_newFunctionCall(t *testing.T) { tests := []struct { name string inv common.Invocation - want func(pdata.Span) + want func(ptrace.Span) }{ { name: "set name", @@ -56,7 +57,7 @@ func Test_newFunctionCall(t *testing.T) { }, }, }, - want: func(span pdata.Span) { + want: func(span ptrace.Span) { input.CopyTo(span) span.SetName("cat") }, @@ -83,9 +84,9 @@ func Test_newFunctionCall(t *testing.T) { }, }, }, - want: func(span pdata.Span) { + want: func(span ptrace.Span) { input.CopyTo(span) - span.Status().SetCode(pdata.StatusCodeOk) + span.Status().SetCode(ptrace.StatusCodeOk) }, }, { @@ -107,10 +108,10 @@ func Test_newFunctionCall(t *testing.T) { }, }, }, - want: func(span pdata.Span) { + want: func(span ptrace.Span) { input.CopyTo(span) span.Attributes().Clear() - attrs := pdata.NewMap() + attrs := pcommon.NewMap() attrs.InsertString("test", "1") attrs.CopyTo(span.Attributes()) }, @@ -137,10 +138,10 @@ func Test_newFunctionCall(t *testing.T) { }, }, }, - want: func(span pdata.Span) { + want: func(span ptrace.Span) { input.CopyTo(span) span.Attributes().Clear() - attrs := pdata.NewMap() + attrs := pcommon.NewMap() attrs.InsertString("test", "1") attrs.InsertInt("test2", 3) attrs.CopyTo(span.Attributes()) @@ -162,7 +163,7 @@ func Test_newFunctionCall(t *testing.T) { }, }, }, - want: func(span pdata.Span) { + want: func(span ptrace.Span) { input.CopyTo(span) span.Attributes().Clear() }, @@ -170,18 +171,18 @@ func Test_newFunctionCall(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - span := pdata.NewSpan() + span := ptrace.NewSpan() input.CopyTo(span) evaluate, err := common.NewFunctionCall(tt.inv, DefaultFunctions(), ParsePath) assert.NoError(t, err) evaluate(spanTransformContext{ span: span, - il: pdata.NewInstrumentationScope(), - resource: pdata.NewResource(), + il: pcommon.NewInstrumentationScope(), + resource: pcommon.NewResource(), }) - expected := pdata.NewSpan() + expected := ptrace.NewSpan() tt.want(expected) assert.Equal(t, expected, span) }) diff --git a/processor/transformprocessor/internal/traces/processor.go b/processor/transformprocessor/internal/traces/processor.go index cdf68ace12fe..a934337ffa40 100644 --- a/processor/transformprocessor/internal/traces/processor.go +++ b/processor/transformprocessor/internal/traces/processor.go @@ -18,7 +18,7 @@ import ( "context" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" @@ -40,7 +40,7 @@ func NewProcessor(statements []string, functions map[string]interface{}, setting }, nil } -func (p *Processor) ProcessTraces(_ context.Context, td pdata.Traces) (pdata.Traces, error) { +func (p *Processor) ProcessTraces(_ context.Context, td ptrace.Traces) (ptrace.Traces, error) { ctx := spanTransformContext{} for i := 0; i < td.ResourceSpans().Len(); i++ { rspans := td.ResourceSpans().At(i) diff --git a/processor/transformprocessor/internal/traces/processor_test.go b/processor/transformprocessor/internal/traces/processor_test.go index da24cbd7e392..88024654a2d1 100644 --- a/processor/transformprocessor/internal/traces/processor_test.go +++ b/processor/transformprocessor/internal/traces/processor_test.go @@ -21,47 +21,48 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) var ( TestSpanStartTime = time.Date(2020, 2, 11, 20, 26, 12, 321, time.UTC) - TestSpanStartTimestamp = pdata.NewTimestampFromTime(TestSpanStartTime) + TestSpanStartTimestamp = pcommon.NewTimestampFromTime(TestSpanStartTime) TestSpanEndTime = time.Date(2020, 2, 11, 20, 26, 13, 789, time.UTC) - TestSpanEndTimestamp = pdata.NewTimestampFromTime(TestSpanEndTime) + TestSpanEndTimestamp = pcommon.NewTimestampFromTime(TestSpanEndTime) ) func TestProcess(t *testing.T) { tests := []struct { query string - want func(td pdata.Traces) + want func(td ptrace.Traces) }{ { query: `set(attributes["test"], "pass") where name == "operationA"`, - want: func(td pdata.Traces) { + want: func(td ptrace.Traces) { td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().InsertString("test", "pass") }, }, { query: `set(attributes["test"], "pass") where resource.attributes["host.name"] == "localhost"`, - want: func(td pdata.Traces) { + want: func(td ptrace.Traces) { td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().InsertString("test", "pass") td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Attributes().InsertString("test", "pass") }, }, { query: `keep_keys(attributes, "http.method") where name == "operationA"`, - want: func(td pdata.Traces) { + want: func(td ptrace.Traces) { td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().Clear() td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Attributes().InsertString("http.method", "get") }, }, { query: `set(status.code, 1) where attributes["http.path"] == "/health"`, - want: func(td pdata.Traces) { - td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Status().SetCode(pdata.StatusCodeOk) - td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Status().SetCode(pdata.StatusCodeOk) + want: func(td ptrace.Traces) { + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0).Status().SetCode(ptrace.StatusCodeOk) + td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(1).Status().SetCode(ptrace.StatusCodeOk) }, }, } @@ -173,8 +174,8 @@ func BenchmarkHundredSpans(b *testing.B) { } } -func constructTraces() pdata.Traces { - td := pdata.NewTraces() +func constructTraces() ptrace.Traces { + td := ptrace.NewTraces() rs0 := td.ResourceSpans().AppendEmpty() rs0.Resource().Attributes().InsertString("host.name", "localhost") rs0ils0 := rs0.ScopeSpans().AppendEmpty() @@ -183,8 +184,8 @@ func constructTraces() pdata.Traces { return td } -func constructTracesNum(num int) pdata.Traces { - td := pdata.NewTraces() +func constructTracesNum(num int) ptrace.Traces { + td := ptrace.NewTraces() rs0 := td.ResourceSpans().AppendEmpty() rs0ils0 := rs0.ScopeSpans().AppendEmpty() for i := 0; i < num; i++ { @@ -193,7 +194,7 @@ func constructTracesNum(num int) pdata.Traces { return td } -func fillSpanOne(span pdata.Span) { +func fillSpanOne(span ptrace.Span) { span.SetName("operationA") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) @@ -202,11 +203,11 @@ func fillSpanOne(span pdata.Span) { span.Attributes().InsertString("http.path", "/health") span.Attributes().InsertString("http.url", "http://localhost/health") status := span.Status() - status.SetCode(pdata.StatusCodeError) + status.SetCode(ptrace.StatusCodeError) status.SetMessage("status-cancelled") } -func fillSpanTwo(span pdata.Span) { +func fillSpanTwo(span ptrace.Span) { span.SetName("operationB") span.SetStartTimestamp(TestSpanStartTimestamp) span.SetEndTimestamp(TestSpanEndTimestamp) @@ -219,6 +220,6 @@ func fillSpanTwo(span pdata.Span) { link1.SetDroppedAttributesCount(4) span.SetDroppedLinksCount(3) status := span.Status() - status.SetCode(pdata.StatusCodeError) + status.SetCode(ptrace.StatusCodeError) status.SetMessage("status-cancelled") } diff --git a/processor/transformprocessor/internal/traces/traces.go b/processor/transformprocessor/internal/traces/traces.go index fcff3ef966fa..e4b56382032d 100644 --- a/processor/transformprocessor/internal/traces/traces.go +++ b/processor/transformprocessor/internal/traces/traces.go @@ -19,26 +19,27 @@ import ( "fmt" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) type spanTransformContext struct { - span pdata.Span - il pdata.InstrumentationScope - resource pdata.Resource + span ptrace.Span + il pcommon.InstrumentationScope + resource pcommon.Resource } func (ctx spanTransformContext) GetItem() interface{} { return ctx.span } -func (ctx spanTransformContext) GetInstrumentationScope() pdata.InstrumentationScope { +func (ctx spanTransformContext) GetInstrumentationScope() pcommon.InstrumentationScope { return ctx.il } -func (ctx spanTransformContext) GetResource() pdata.Resource { +func (ctx spanTransformContext) GetResource() pcommon.Resource { return ctx.resource } @@ -139,7 +140,7 @@ func accessResource() pathGetSetter { return ctx.GetResource() }, setter: func(ctx common.TransformContext, val interface{}) { - if newRes, ok := val.(pdata.Resource); ok { + if newRes, ok := val.(pcommon.Resource); ok { ctx.GetResource().Attributes().Clear() newRes.CopyTo(ctx.GetResource()) } @@ -153,7 +154,7 @@ func accessResourceAttributes() pathGetSetter { return ctx.GetResource().Attributes() }, setter: func(ctx common.TransformContext, val interface{}) { - if attrs, ok := val.(pdata.Map); ok { + if attrs, ok := val.(pcommon.Map); ok { ctx.GetResource().Attributes().Clear() attrs.CopyTo(ctx.GetResource().Attributes()) } @@ -178,7 +179,7 @@ func accessInstrumentationScope() pathGetSetter { return ctx.GetInstrumentationScope() }, setter: func(ctx common.TransformContext, val interface{}) { - if newIl, ok := val.(pdata.InstrumentationScope); ok { + if newIl, ok := val.(pcommon.InstrumentationScope); ok { newIl.CopyTo(ctx.GetInstrumentationScope()) } }, @@ -214,14 +215,14 @@ func accessInstrumentationScopeVersion() pathGetSetter { func accessTraceID() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).TraceID() + return ctx.GetItem().(ptrace.Span).TraceID() }, setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { id, _ := hex.DecodeString(str) var idArr [16]byte copy(idArr[:16], id) - ctx.GetItem().(pdata.Span).SetTraceID(pdata.NewTraceID(idArr)) + ctx.GetItem().(ptrace.Span).SetTraceID(pcommon.NewTraceID(idArr)) } }, } @@ -230,14 +231,14 @@ func accessTraceID() pathGetSetter { func accessSpanID() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).SpanID() + return ctx.GetItem().(ptrace.Span).SpanID() }, setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { id, _ := hex.DecodeString(str) var idArr [8]byte copy(idArr[:8], id) - ctx.GetItem().(pdata.Span).SetSpanID(pdata.NewSpanID(idArr)) + ctx.GetItem().(ptrace.Span).SetSpanID(pcommon.NewSpanID(idArr)) } }, } @@ -246,11 +247,11 @@ func accessSpanID() pathGetSetter { func accessTraceState() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).TraceState() + return ctx.GetItem().(ptrace.Span).TraceState() }, setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.GetItem().(pdata.Span).SetTraceState(pdata.TraceState(str)) + ctx.GetItem().(ptrace.Span).SetTraceState(ptrace.TraceState(str)) } }, } @@ -259,14 +260,14 @@ func accessTraceState() pathGetSetter { func accessParentSpanID() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).ParentSpanID() + return ctx.GetItem().(ptrace.Span).ParentSpanID() }, setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { id, _ := hex.DecodeString(str) var idArr [8]byte copy(idArr[:8], id) - ctx.GetItem().(pdata.Span).SetParentSpanID(pdata.NewSpanID(idArr)) + ctx.GetItem().(ptrace.Span).SetParentSpanID(pcommon.NewSpanID(idArr)) } }, } @@ -275,11 +276,11 @@ func accessParentSpanID() pathGetSetter { func accessName() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Name() + return ctx.GetItem().(ptrace.Span).Name() }, setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.GetItem().(pdata.Span).SetName(str) + ctx.GetItem().(ptrace.Span).SetName(str) } }, } @@ -288,11 +289,11 @@ func accessName() pathGetSetter { func accessKind() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Kind() + return ctx.GetItem().(ptrace.Span).Kind() }, setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.GetItem().(pdata.Span).SetKind(pdata.SpanKind(i)) + ctx.GetItem().(ptrace.Span).SetKind(ptrace.SpanKind(i)) } }, } @@ -301,11 +302,11 @@ func accessKind() pathGetSetter { func accessStartTimeUnixNano() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).StartTimestamp().AsTime().UnixNano() + return ctx.GetItem().(ptrace.Span).StartTimestamp().AsTime().UnixNano() }, setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.GetItem().(pdata.Span).SetStartTimestamp(pdata.NewTimestampFromTime(time.Unix(0, i))) + ctx.GetItem().(ptrace.Span).SetStartTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } }, } @@ -314,11 +315,11 @@ func accessStartTimeUnixNano() pathGetSetter { func accessEndTimeUnixNano() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).EndTimestamp().AsTime().UnixNano() + return ctx.GetItem().(ptrace.Span).EndTimestamp().AsTime().UnixNano() }, setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.GetItem().(pdata.Span).SetEndTimestamp(pdata.NewTimestampFromTime(time.Unix(0, i))) + ctx.GetItem().(ptrace.Span).SetEndTimestamp(pcommon.NewTimestampFromTime(time.Unix(0, i))) } }, } @@ -327,12 +328,12 @@ func accessEndTimeUnixNano() pathGetSetter { func accessAttributes() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Attributes() + return ctx.GetItem().(ptrace.Span).Attributes() }, setter: func(ctx common.TransformContext, val interface{}) { - if attrs, ok := val.(pdata.Map); ok { - ctx.GetItem().(pdata.Span).Attributes().Clear() - attrs.CopyTo(ctx.GetItem().(pdata.Span).Attributes()) + if attrs, ok := val.(pcommon.Map); ok { + ctx.GetItem().(ptrace.Span).Attributes().Clear() + attrs.CopyTo(ctx.GetItem().(ptrace.Span).Attributes()) } }, } @@ -341,10 +342,10 @@ func accessAttributes() pathGetSetter { func accessAttributesKey(mapKey *string) pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return getAttr(ctx.GetItem().(pdata.Span).Attributes(), *mapKey) + return getAttr(ctx.GetItem().(ptrace.Span).Attributes(), *mapKey) }, setter: func(ctx common.TransformContext, val interface{}) { - setAttr(ctx.GetItem().(pdata.Span).Attributes(), *mapKey, val) + setAttr(ctx.GetItem().(ptrace.Span).Attributes(), *mapKey, val) }, } } @@ -352,11 +353,11 @@ func accessAttributesKey(mapKey *string) pathGetSetter { func accessDroppedAttributesCount() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).DroppedAttributesCount() + return ctx.GetItem().(ptrace.Span).DroppedAttributesCount() }, setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.GetItem().(pdata.Span).SetDroppedAttributesCount(uint32(i)) + ctx.GetItem().(ptrace.Span).SetDroppedAttributesCount(uint32(i)) } }, } @@ -365,14 +366,14 @@ func accessDroppedAttributesCount() pathGetSetter { func accessEvents() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Events() + return ctx.GetItem().(ptrace.Span).Events() }, setter: func(ctx common.TransformContext, val interface{}) { - if slc, ok := val.(pdata.SpanEventSlice); ok { - ctx.GetItem().(pdata.Span).Events().RemoveIf(func(event pdata.SpanEvent) bool { + if slc, ok := val.(ptrace.SpanEventSlice); ok { + ctx.GetItem().(ptrace.Span).Events().RemoveIf(func(event ptrace.SpanEvent) bool { return true }) - slc.CopyTo(ctx.GetItem().(pdata.Span).Events()) + slc.CopyTo(ctx.GetItem().(ptrace.Span).Events()) } }, } @@ -381,11 +382,11 @@ func accessEvents() pathGetSetter { func accessDroppedEventsCount() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).DroppedEventsCount() + return ctx.GetItem().(ptrace.Span).DroppedEventsCount() }, setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.GetItem().(pdata.Span).SetDroppedEventsCount(uint32(i)) + ctx.GetItem().(ptrace.Span).SetDroppedEventsCount(uint32(i)) } }, } @@ -394,14 +395,14 @@ func accessDroppedEventsCount() pathGetSetter { func accessLinks() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Links() + return ctx.GetItem().(ptrace.Span).Links() }, setter: func(ctx common.TransformContext, val interface{}) { - if slc, ok := val.(pdata.SpanLinkSlice); ok { - ctx.GetItem().(pdata.Span).Links().RemoveIf(func(event pdata.SpanLink) bool { + if slc, ok := val.(ptrace.SpanLinkSlice); ok { + ctx.GetItem().(ptrace.Span).Links().RemoveIf(func(event ptrace.SpanLink) bool { return true }) - slc.CopyTo(ctx.GetItem().(pdata.Span).Links()) + slc.CopyTo(ctx.GetItem().(ptrace.Span).Links()) } }, } @@ -410,11 +411,11 @@ func accessLinks() pathGetSetter { func accessDroppedLinksCount() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).DroppedLinksCount() + return ctx.GetItem().(ptrace.Span).DroppedLinksCount() }, setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.GetItem().(pdata.Span).SetDroppedLinksCount(uint32(i)) + ctx.GetItem().(ptrace.Span).SetDroppedLinksCount(uint32(i)) } }, } @@ -423,11 +424,11 @@ func accessDroppedLinksCount() pathGetSetter { func accessStatus() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Status() + return ctx.GetItem().(ptrace.Span).Status() }, setter: func(ctx common.TransformContext, val interface{}) { - if status, ok := val.(pdata.SpanStatus); ok { - status.CopyTo(ctx.GetItem().(pdata.Span).Status()) + if status, ok := val.(ptrace.SpanStatus); ok { + status.CopyTo(ctx.GetItem().(ptrace.Span).Status()) } }, } @@ -436,11 +437,11 @@ func accessStatus() pathGetSetter { func accessStatusCode() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Status().Code() + return ctx.GetItem().(ptrace.Span).Status().Code() }, setter: func(ctx common.TransformContext, val interface{}) { if i, ok := val.(int64); ok { - ctx.GetItem().(pdata.Span).Status().SetCode(pdata.StatusCode(i)) + ctx.GetItem().(ptrace.Span).Status().SetCode(ptrace.StatusCode(i)) } }, } @@ -449,41 +450,41 @@ func accessStatusCode() pathGetSetter { func accessStatusMessage() pathGetSetter { return pathGetSetter{ getter: func(ctx common.TransformContext) interface{} { - return ctx.GetItem().(pdata.Span).Status().Message() + return ctx.GetItem().(ptrace.Span).Status().Message() }, setter: func(ctx common.TransformContext, val interface{}) { if str, ok := val.(string); ok { - ctx.GetItem().(pdata.Span).Status().SetMessage(str) + ctx.GetItem().(ptrace.Span).Status().SetMessage(str) } }, } } -func getAttr(attrs pdata.Map, mapKey string) interface{} { +func getAttr(attrs pcommon.Map, mapKey string) interface{} { val, ok := attrs.Get(mapKey) if !ok { return nil } switch val.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: return val.StringVal() - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: return val.BoolVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: return val.IntVal() - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: return val.DoubleVal() - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: return val.MapVal() - case pdata.ValueTypeSlice: + case pcommon.ValueTypeSlice: return val.SliceVal() - case pdata.ValueTypeBytes: + case pcommon.ValueTypeBytes: return val.BytesVal() } return nil } -func setAttr(attrs pdata.Map, mapKey string, val interface{}) { +func setAttr(attrs pcommon.Map, mapKey string, val interface{}) { switch v := val.(type) { case string: attrs.UpsertString(mapKey, v) @@ -496,31 +497,31 @@ func setAttr(attrs pdata.Map, mapKey string, val interface{}) { case []byte: attrs.UpsertBytes(mapKey, v) case []string: - arr := pdata.NewValueSlice() + arr := pcommon.NewValueSlice() for _, str := range v { arr.SliceVal().AppendEmpty().SetStringVal(str) } attrs.Upsert(mapKey, arr) case []bool: - arr := pdata.NewValueSlice() + arr := pcommon.NewValueSlice() for _, b := range v { arr.SliceVal().AppendEmpty().SetBoolVal(b) } attrs.Upsert(mapKey, arr) case []int64: - arr := pdata.NewValueSlice() + arr := pcommon.NewValueSlice() for _, i := range v { arr.SliceVal().AppendEmpty().SetIntVal(i) } attrs.Upsert(mapKey, arr) case []float64: - arr := pdata.NewValueSlice() + arr := pcommon.NewValueSlice() for _, f := range v { arr.SliceVal().AppendEmpty().SetDoubleVal(f) } attrs.Upsert(mapKey, arr) case [][]byte: - arr := pdata.NewValueSlice() + arr := pcommon.NewValueSlice() for _, b := range v { arr.SliceVal().AppendEmpty().SetBytesVal(b) } diff --git a/processor/transformprocessor/internal/traces/traces_test.go b/processor/transformprocessor/internal/traces/traces_test.go index f887ecf5d27d..1acb9ec302f8 100644 --- a/processor/transformprocessor/internal/traces/traces_test.go +++ b/processor/transformprocessor/internal/traces/traces_test.go @@ -20,7 +20,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor/internal/common" ) @@ -35,31 +36,31 @@ var ( func Test_newPathGetSetter(t *testing.T) { refSpan, _, _ := createTelemetry() - newAttrs := pdata.NewMap() + newAttrs := pcommon.NewMap() newAttrs.UpsertString("hello", "world") - newEvents := pdata.NewSpanEventSlice() + newEvents := ptrace.NewSpanEventSlice() newEvents.AppendEmpty().SetName("new event") - newLinks := pdata.NewSpanLinkSlice() - newLinks.AppendEmpty().SetSpanID(pdata.NewSpanID(spanID2)) + newLinks := ptrace.NewSpanLinkSlice() + newLinks.AppendEmpty().SetSpanID(pcommon.NewSpanID(spanID2)) - newStatus := pdata.NewSpanStatus() + newStatus := ptrace.NewSpanStatus() newStatus.SetMessage("new status") - newArrStr := pdata.NewValueSlice() + newArrStr := pcommon.NewValueSlice() newArrStr.SliceVal().AppendEmpty().SetStringVal("new") - newArrBool := pdata.NewValueSlice() + newArrBool := pcommon.NewValueSlice() newArrBool.SliceVal().AppendEmpty().SetBoolVal(false) - newArrInt := pdata.NewValueSlice() + newArrInt := pcommon.NewValueSlice() newArrInt.SliceVal().AppendEmpty().SetIntVal(20) - newArrFloat := pdata.NewValueSlice() + newArrFloat := pcommon.NewValueSlice() newArrFloat.SliceVal().AppendEmpty().SetDoubleVal(2.0) - newArrBytes := pdata.NewValueSlice() + newArrBytes := pcommon.NewValueSlice() newArrBytes.SliceVal().AppendEmpty().SetBytesVal([]byte{9, 6, 4}) tests := []struct { @@ -67,7 +68,7 @@ func Test_newPathGetSetter(t *testing.T) { path []common.Field orig interface{} new interface{} - modified func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) + modified func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) }{ { name: "trace_id", @@ -76,10 +77,10 @@ func Test_newPathGetSetter(t *testing.T) { Name: "trace_id", }, }, - orig: pdata.NewTraceID(traceID), + orig: pcommon.NewTraceID(traceID), new: hex.EncodeToString(traceID2[:]), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.SetTraceID(pdata.NewTraceID(traceID2)) + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.SetTraceID(pcommon.NewTraceID(traceID2)) }, }, { @@ -89,10 +90,10 @@ func Test_newPathGetSetter(t *testing.T) { Name: "span_id", }, }, - orig: pdata.NewSpanID(spanID), + orig: pcommon.NewSpanID(spanID), new: hex.EncodeToString(spanID2[:]), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.SetSpanID(pdata.NewSpanID(spanID2)) + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.SetSpanID(pcommon.NewSpanID(spanID2)) }, }, { @@ -102,9 +103,9 @@ func Test_newPathGetSetter(t *testing.T) { Name: "trace_state", }, }, - orig: pdata.TraceState("state"), + orig: ptrace.TraceState("state"), new: "newstate", - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.SetTraceState("newstate") }, }, @@ -115,10 +116,10 @@ func Test_newPathGetSetter(t *testing.T) { Name: "parent_span_id", }, }, - orig: pdata.NewSpanID(spanID2), + orig: pcommon.NewSpanID(spanID2), new: hex.EncodeToString(spanID[:]), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.SetParentSpanID(pdata.NewSpanID(spanID)) + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.SetParentSpanID(pcommon.NewSpanID(spanID)) }, }, { @@ -130,7 +131,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: "bear", new: "cat", - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.SetName("cat") }, }, @@ -141,10 +142,10 @@ func Test_newPathGetSetter(t *testing.T) { Name: "kind", }, }, - orig: pdata.SpanKindServer, + orig: ptrace.SpanKindServer, new: int64(3), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.SetKind(pdata.SpanKindClient) + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.SetKind(ptrace.SpanKindClient) }, }, { @@ -156,8 +157,8 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: int64(100_000_000), new: int64(200_000_000), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.SetStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(200))) + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) }, }, { @@ -169,8 +170,8 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: int64(500_000_000), new: int64(200_000_000), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.SetEndTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(200))) + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(200))) }, }, { @@ -182,7 +183,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: refSpan.Attributes(), new: newAttrs, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().Clear() newAttrs.CopyTo(span.Attributes()) }, @@ -197,7 +198,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: "val", new: "newVal", - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().UpsertString("str", "newVal") }, }, @@ -211,7 +212,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: true, new: false, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().UpsertBool("bool", false) }, }, @@ -225,7 +226,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: int64(10), new: int64(20), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().UpsertInt("int", 20) }, }, @@ -239,7 +240,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: float64(1.2), new: float64(2.4), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().UpsertDouble("double", 2.4) }, }, @@ -253,7 +254,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: []byte{1, 3, 2}, new: []byte{2, 3, 4}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().UpsertBytes("bytes", []byte{2, 3, 4}) }, }, @@ -265,12 +266,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_str"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_str") return val.SliceVal() }(), new: []string{"new"}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().Upsert("arr_str", newArrStr) }, }, @@ -282,12 +283,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_bool"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_bool") return val.SliceVal() }(), new: []bool{false}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().Upsert("arr_bool", newArrBool) }, }, @@ -299,12 +300,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_int"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_int") return val.SliceVal() }(), new: []int64{20}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().Upsert("arr_int", newArrInt) }, }, @@ -316,12 +317,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_float"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_float") return val.SliceVal() }(), new: []float64{2.0}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().Upsert("arr_float", newArrFloat) }, }, @@ -333,12 +334,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_bytes"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_bytes") return val.SliceVal() }(), new: [][]byte{{9, 6, 4}}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Attributes().Upsert("arr_bytes", newArrBytes) }, }, @@ -351,7 +352,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: uint32(10), new: int64(20), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.SetDroppedAttributesCount(20) }, }, @@ -364,8 +365,8 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: refSpan.Events(), new: newEvents, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.Events().RemoveIf(func(_ pdata.SpanEvent) bool { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.Events().RemoveIf(func(_ ptrace.SpanEvent) bool { return true }) newEvents.CopyTo(span.Events()) @@ -380,7 +381,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: uint32(20), new: int64(30), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.SetDroppedEventsCount(30) }, }, @@ -393,8 +394,8 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: refSpan.Links(), new: newLinks, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.Links().RemoveIf(func(_ pdata.SpanLink) bool { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.Links().RemoveIf(func(_ ptrace.SpanLink) bool { return true }) newLinks.CopyTo(span.Links()) @@ -409,7 +410,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: uint32(30), new: int64(40), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.SetDroppedLinksCount(40) }, }, @@ -422,7 +423,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: refSpan.Status(), new: newStatus, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { newStatus.CopyTo(span.Status()) }, }, @@ -436,10 +437,10 @@ func Test_newPathGetSetter(t *testing.T) { Name: "code", }, }, - orig: pdata.StatusCodeOk, - new: int64(pdata.StatusCodeError), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { - span.Status().SetCode(pdata.StatusCodeError) + orig: ptrace.StatusCodeOk, + new: int64(ptrace.StatusCodeError), + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { + span.Status().SetCode(ptrace.StatusCodeError) }, }, { @@ -454,7 +455,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: "good span", new: "bad span", - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { span.Status().SetMessage("bad span") }, }, @@ -470,7 +471,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: refSpan.Attributes(), new: newAttrs, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().Clear() newAttrs.CopyTo(resource.Attributes()) }, @@ -488,7 +489,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: "val", new: "newVal", - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().UpsertString("str", "newVal") }, }, @@ -505,7 +506,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: true, new: false, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().UpsertBool("bool", false) }, }, @@ -522,7 +523,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: int64(10), new: int64(20), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().UpsertInt("int", 20) }, }, @@ -539,7 +540,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: float64(1.2), new: float64(2.4), - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().UpsertDouble("double", 2.4) }, }, @@ -556,7 +557,7 @@ func Test_newPathGetSetter(t *testing.T) { }, orig: []byte{1, 3, 2}, new: []byte{2, 3, 4}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().UpsertBytes("bytes", []byte{2, 3, 4}) }, }, @@ -571,12 +572,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_str"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_str") return val.SliceVal() }(), new: []string{"new"}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().Upsert("arr_str", newArrStr) }, }, @@ -591,12 +592,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_bool"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_bool") return val.SliceVal() }(), new: []bool{false}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().Upsert("arr_bool", newArrBool) }, }, @@ -611,12 +612,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_int"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_int") return val.SliceVal() }(), new: []int64{20}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().Upsert("arr_int", newArrInt) }, }, @@ -631,12 +632,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_float"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_float") return val.SliceVal() }(), new: []float64{2.0}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().Upsert("arr_float", newArrFloat) }, }, @@ -651,12 +652,12 @@ func Test_newPathGetSetter(t *testing.T) { MapKey: strp("arr_bytes"), }, }, - orig: func() pdata.Slice { + orig: func() pcommon.Slice { val, _ := refSpan.Attributes().Get("arr_bytes") return val.SliceVal() }(), new: [][]byte{{9, 6, 4}}, - modified: func(span pdata.Span, il pdata.InstrumentationScope, resource pdata.Resource) { + modified: func(span ptrace.Span, il pcommon.InstrumentationScope, resource pcommon.Resource) { resource.Attributes().Upsert("arr_bytes", newArrBytes) }, }, @@ -691,43 +692,43 @@ func Test_newPathGetSetter(t *testing.T) { } } -func createTelemetry() (pdata.Span, pdata.InstrumentationScope, pdata.Resource) { - span := pdata.NewSpan() - span.SetTraceID(pdata.NewTraceID(traceID)) - span.SetSpanID(pdata.NewSpanID(spanID)) +func createTelemetry() (ptrace.Span, pcommon.InstrumentationScope, pcommon.Resource) { + span := ptrace.NewSpan() + span.SetTraceID(pcommon.NewTraceID(traceID)) + span.SetSpanID(pcommon.NewSpanID(spanID)) span.SetTraceState("state") - span.SetParentSpanID(pdata.NewSpanID(spanID2)) + span.SetParentSpanID(pcommon.NewSpanID(spanID2)) span.SetName("bear") - span.SetKind(pdata.SpanKindServer) - span.SetStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(100))) - span.SetEndTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(500))) + span.SetKind(ptrace.SpanKindServer) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(100))) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(500))) span.Attributes().UpsertString("str", "val") span.Attributes().UpsertBool("bool", true) span.Attributes().UpsertInt("int", 10) span.Attributes().UpsertDouble("double", 1.2) span.Attributes().UpsertBytes("bytes", []byte{1, 3, 2}) - arrStr := pdata.NewValueSlice() + arrStr := pcommon.NewValueSlice() arrStr.SliceVal().AppendEmpty().SetStringVal("one") arrStr.SliceVal().AppendEmpty().SetStringVal("two") span.Attributes().Upsert("arr_str", arrStr) - arrBool := pdata.NewValueSlice() + arrBool := pcommon.NewValueSlice() arrBool.SliceVal().AppendEmpty().SetBoolVal(true) arrBool.SliceVal().AppendEmpty().SetBoolVal(false) span.Attributes().Upsert("arr_bool", arrBool) - arrInt := pdata.NewValueSlice() + arrInt := pcommon.NewValueSlice() arrInt.SliceVal().AppendEmpty().SetIntVal(2) arrInt.SliceVal().AppendEmpty().SetIntVal(3) span.Attributes().Upsert("arr_int", arrInt) - arrFloat := pdata.NewValueSlice() + arrFloat := pcommon.NewValueSlice() arrFloat.SliceVal().AppendEmpty().SetDoubleVal(1.0) arrFloat.SliceVal().AppendEmpty().SetDoubleVal(2.0) span.Attributes().Upsert("arr_float", arrFloat) - arrBytes := pdata.NewValueSlice() + arrBytes := pcommon.NewValueSlice() arrBytes.SliceVal().AppendEmpty().SetBytesVal([]byte{1, 2, 3}) arrBytes.SliceVal().AppendEmpty().SetBytesVal([]byte{2, 3, 4}) span.Attributes().Upsert("arr_bytes", arrBytes) @@ -737,17 +738,17 @@ func createTelemetry() (pdata.Span, pdata.InstrumentationScope, pdata.Resource) span.Events().AppendEmpty().SetName("event") span.SetDroppedEventsCount(20) - span.Links().AppendEmpty().SetTraceID(pdata.NewTraceID(traceID)) + span.Links().AppendEmpty().SetTraceID(pcommon.NewTraceID(traceID)) span.SetDroppedLinksCount(30) - span.Status().SetCode(pdata.StatusCodeOk) + span.Status().SetCode(ptrace.StatusCodeOk) span.Status().SetMessage("good span") - il := pdata.NewInstrumentationScope() + il := pcommon.NewInstrumentationScope() il.SetName("library") il.SetVersion("version") - resource := pdata.NewResource() + resource := pcommon.NewResource() span.Attributes().CopyTo(resource.Attributes()) return span, il, resource diff --git a/receiver/apachereceiver/go.mod b/receiver/apachereceiver/go.mod index edecde439031..6137d32b9448 100644 --- a/receiver/apachereceiver/go.mod +++ b/receiver/apachereceiver/go.mod @@ -5,8 +5,7 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -14,7 +13,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.23 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/containerd v1.5.9 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -32,7 +31,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/uuid v1.3.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -50,7 +49,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -58,8 +56,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -71,6 +69,9 @@ require ( require ( github.com/rogpeppe/go-internal v1.6.2 // indirect github.com/testcontainers/testcontainers-go v0.13.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/apachereceiver/go.sum b/receiver/apachereceiver/go.sum index 7f57599c53eb..9a314d03e6bf 100644 --- a/receiver/apachereceiver/go.sum +++ b/receiver/apachereceiver/go.sum @@ -102,8 +102,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -405,8 +406,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -473,8 +474,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -667,8 +668,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -737,10 +736,10 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -750,7 +749,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -850,8 +849,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -941,8 +941,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go index 9742c9582030..d1ef628db7dc 100644 --- a/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/apachereceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -47,7 +48,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricApacheCurrentConnections struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -57,13 +58,13 @@ func (m *metricApacheCurrentConnections) init() { m.data.SetName("apache.current_connections") m.data.SetDescription("The number of active connections currently attached to the HTTP server.") m.data.SetUnit("connections") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricApacheCurrentConnections) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (m *metricApacheCurrentConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { if !m.settings.Enabled { return } @@ -71,7 +72,7 @@ func (m *metricApacheCurrentConnections) recordDataPoint(start pdata.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ServerName, pdata.NewValueString(serverNameAttributeValue)) + dp.Attributes().Insert(A.ServerName, pcommon.NewValueString(serverNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -82,7 +83,7 @@ func (m *metricApacheCurrentConnections) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApacheCurrentConnections) emit(metrics pdata.MetricSlice) { +func (m *metricApacheCurrentConnections) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -93,14 +94,14 @@ func (m *metricApacheCurrentConnections) emit(metrics pdata.MetricSlice) { func newMetricApacheCurrentConnections(settings MetricSettings) metricApacheCurrentConnections { m := metricApacheCurrentConnections{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricApacheRequests struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -110,13 +111,13 @@ func (m *metricApacheRequests) init() { m.data.SetName("apache.requests") m.data.SetDescription("The number of requests serviced by the HTTP server per second.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricApacheRequests) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (m *metricApacheRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { if !m.settings.Enabled { return } @@ -124,7 +125,7 @@ func (m *metricApacheRequests) recordDataPoint(start pdata.Timestamp, ts pdata.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ServerName, pdata.NewValueString(serverNameAttributeValue)) + dp.Attributes().Insert(A.ServerName, pcommon.NewValueString(serverNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -135,7 +136,7 @@ func (m *metricApacheRequests) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApacheRequests) emit(metrics pdata.MetricSlice) { +func (m *metricApacheRequests) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -146,14 +147,14 @@ func (m *metricApacheRequests) emit(metrics pdata.MetricSlice) { func newMetricApacheRequests(settings MetricSettings) metricApacheRequests { m := metricApacheRequests{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricApacheScoreboard struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -163,13 +164,13 @@ func (m *metricApacheScoreboard) init() { m.data.SetName("apache.scoreboard") m.data.SetDescription("The number of connections in each state.") m.data.SetUnit("scoreboard") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricApacheScoreboard) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, serverNameAttributeValue string, scoreboardStateAttributeValue string) { +func (m *metricApacheScoreboard) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverNameAttributeValue string, scoreboardStateAttributeValue string) { if !m.settings.Enabled { return } @@ -177,8 +178,8 @@ func (m *metricApacheScoreboard) recordDataPoint(start pdata.Timestamp, ts pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ServerName, pdata.NewValueString(serverNameAttributeValue)) - dp.Attributes().Insert(A.ScoreboardState, pdata.NewValueString(scoreboardStateAttributeValue)) + dp.Attributes().Insert(A.ServerName, pcommon.NewValueString(serverNameAttributeValue)) + dp.Attributes().Insert(A.ScoreboardState, pcommon.NewValueString(scoreboardStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -189,7 +190,7 @@ func (m *metricApacheScoreboard) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApacheScoreboard) emit(metrics pdata.MetricSlice) { +func (m *metricApacheScoreboard) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -200,14 +201,14 @@ func (m *metricApacheScoreboard) emit(metrics pdata.MetricSlice) { func newMetricApacheScoreboard(settings MetricSettings) metricApacheScoreboard { m := metricApacheScoreboard{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricApacheTraffic struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -217,13 +218,13 @@ func (m *metricApacheTraffic) init() { m.data.SetName("apache.traffic") m.data.SetDescription("Total HTTP server traffic.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricApacheTraffic) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (m *metricApacheTraffic) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { if !m.settings.Enabled { return } @@ -231,7 +232,7 @@ func (m *metricApacheTraffic) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ServerName, pdata.NewValueString(serverNameAttributeValue)) + dp.Attributes().Insert(A.ServerName, pcommon.NewValueString(serverNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -242,7 +243,7 @@ func (m *metricApacheTraffic) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApacheTraffic) emit(metrics pdata.MetricSlice) { +func (m *metricApacheTraffic) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -253,14 +254,14 @@ func (m *metricApacheTraffic) emit(metrics pdata.MetricSlice) { func newMetricApacheTraffic(settings MetricSettings) metricApacheTraffic { m := metricApacheTraffic{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricApacheUptime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -270,13 +271,13 @@ func (m *metricApacheUptime) init() { m.data.SetName("apache.uptime") m.data.SetDescription("The amount of time that the server has been running in seconds.") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricApacheUptime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (m *metricApacheUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { if !m.settings.Enabled { return } @@ -284,7 +285,7 @@ func (m *metricApacheUptime) recordDataPoint(start pdata.Timestamp, ts pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ServerName, pdata.NewValueString(serverNameAttributeValue)) + dp.Attributes().Insert(A.ServerName, pcommon.NewValueString(serverNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -295,7 +296,7 @@ func (m *metricApacheUptime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApacheUptime) emit(metrics pdata.MetricSlice) { +func (m *metricApacheUptime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -306,14 +307,14 @@ func (m *metricApacheUptime) emit(metrics pdata.MetricSlice) { func newMetricApacheUptime(settings MetricSettings) metricApacheUptime { m := metricApacheUptime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricApacheWorkers struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -323,13 +324,13 @@ func (m *metricApacheWorkers) init() { m.data.SetName("apache.workers") m.data.SetDescription("The number of workers currently attached to the HTTP server.") m.data.SetUnit("connections") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricApacheWorkers) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, serverNameAttributeValue string, workersStateAttributeValue string) { +func (m *metricApacheWorkers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, serverNameAttributeValue string, workersStateAttributeValue string) { if !m.settings.Enabled { return } @@ -337,8 +338,8 @@ func (m *metricApacheWorkers) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ServerName, pdata.NewValueString(serverNameAttributeValue)) - dp.Attributes().Insert(A.WorkersState, pdata.NewValueString(workersStateAttributeValue)) + dp.Attributes().Insert(A.ServerName, pcommon.NewValueString(serverNameAttributeValue)) + dp.Attributes().Insert(A.WorkersState, pcommon.NewValueString(workersStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -349,7 +350,7 @@ func (m *metricApacheWorkers) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricApacheWorkers) emit(metrics pdata.MetricSlice) { +func (m *metricApacheWorkers) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -360,7 +361,7 @@ func (m *metricApacheWorkers) emit(metrics pdata.MetricSlice) { func newMetricApacheWorkers(settings MetricSettings) metricApacheWorkers { m := metricApacheWorkers{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -369,10 +370,10 @@ func newMetricApacheWorkers(settings MetricSettings) metricApacheWorkers { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricApacheCurrentConnections metricApacheCurrentConnections metricApacheRequests metricApacheRequests metricApacheScoreboard metricApacheScoreboard @@ -385,7 +386,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -393,8 +394,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricApacheCurrentConnections: newMetricApacheCurrentConnections(settings.ApacheCurrentConnections), metricApacheRequests: newMetricApacheRequests(settings.ApacheRequests), metricApacheScoreboard: newMetricApacheScoreboard(settings.ApacheScoreboard), @@ -409,7 +410,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -419,14 +420,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -449,47 +450,47 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordApacheCurrentConnectionsDataPoint adds a data point to apache.current_connections metric. -func (mb *MetricsBuilder) RecordApacheCurrentConnectionsDataPoint(ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (mb *MetricsBuilder) RecordApacheCurrentConnectionsDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { mb.metricApacheCurrentConnections.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue) } // RecordApacheRequestsDataPoint adds a data point to apache.requests metric. -func (mb *MetricsBuilder) RecordApacheRequestsDataPoint(ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (mb *MetricsBuilder) RecordApacheRequestsDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { mb.metricApacheRequests.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue) } // RecordApacheScoreboardDataPoint adds a data point to apache.scoreboard metric. -func (mb *MetricsBuilder) RecordApacheScoreboardDataPoint(ts pdata.Timestamp, val int64, serverNameAttributeValue string, scoreboardStateAttributeValue string) { +func (mb *MetricsBuilder) RecordApacheScoreboardDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string, scoreboardStateAttributeValue string) { mb.metricApacheScoreboard.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue, scoreboardStateAttributeValue) } // RecordApacheTrafficDataPoint adds a data point to apache.traffic metric. -func (mb *MetricsBuilder) RecordApacheTrafficDataPoint(ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (mb *MetricsBuilder) RecordApacheTrafficDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { mb.metricApacheTraffic.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue) } // RecordApacheUptimeDataPoint adds a data point to apache.uptime metric. -func (mb *MetricsBuilder) RecordApacheUptimeDataPoint(ts pdata.Timestamp, val int64, serverNameAttributeValue string) { +func (mb *MetricsBuilder) RecordApacheUptimeDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string) { mb.metricApacheUptime.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue) } // RecordApacheWorkersDataPoint adds a data point to apache.workers metric. -func (mb *MetricsBuilder) RecordApacheWorkersDataPoint(ts pdata.Timestamp, val int64, serverNameAttributeValue string, workersStateAttributeValue string) { +func (mb *MetricsBuilder) RecordApacheWorkersDataPoint(ts pcommon.Timestamp, val int64, serverNameAttributeValue string, workersStateAttributeValue string) { mb.metricApacheWorkers.recordDataPoint(mb.startTime, ts, val, serverNameAttributeValue, workersStateAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/apachereceiver/scraper.go b/receiver/apachereceiver/scraper.go index b65af119a5ae..7e6dbb6e9c68 100644 --- a/receiver/apachereceiver/scraper.go +++ b/receiver/apachereceiver/scraper.go @@ -24,7 +24,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/apachereceiver/internal/metadata" @@ -57,18 +58,18 @@ func (r *apacheScraper) start(_ context.Context, host component.Host) error { return nil } -func (r *apacheScraper) scrape(context.Context) (pdata.Metrics, error) { +func (r *apacheScraper) scrape(context.Context) (pmetric.Metrics, error) { if r.httpClient == nil { - return pdata.Metrics{}, errors.New("failed to connect to Apache HTTPd") + return pmetric.Metrics{}, errors.New("failed to connect to Apache HTTPd") } stats, err := r.GetStats() if err != nil { r.settings.Logger.Error("failed to fetch Apache Httpd stats", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) for metricKey, metricValue := range parseStats(stats) { switch metricKey { case "ServerUptimeSeconds": diff --git a/receiver/awscontainerinsightreceiver/go.mod b/receiver/awscontainerinsightreceiver/go.mod index 90b383081fb6..ea14d2f636c4 100644 --- a/receiver/awscontainerinsightreceiver/go.mod +++ b/receiver/awscontainerinsightreceiver/go.mod @@ -13,8 +13,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.48.0 github.com/shirou/gopsutil/v3 v3.22.3 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 @@ -58,7 +58,7 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/karrick/godirwalk v1.16.1 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989 // indirect @@ -87,7 +87,6 @@ require ( github.com/rs/cors v1.8.2 // indirect github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect @@ -139,3 +138,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubel // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/awscontainerinsightreceiver/go.sum b/receiver/awscontainerinsightreceiver/go.sum index 3b2fb0b74147..286b82f26d34 100644 --- a/receiver/awscontainerinsightreceiver/go.sum +++ b/receiver/awscontainerinsightreceiver/go.sum @@ -126,7 +126,7 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -553,8 +553,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -784,8 +784,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -867,10 +865,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go index baf1410b14a0..70d1dd9727b1 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_linux.go @@ -33,7 +33,7 @@ import ( cInfo "github.com/google/cadvisor/info/v1" "github.com/google/cadvisor/manager" "github.com/google/cadvisor/utils/sysfs" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ci "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight" @@ -315,9 +315,9 @@ func (c *Cadvisor) decorateMetrics(cadvisormetrics []*extractors.CAdvisorMetric) } // GetMetrics generates metrics from cadvisor -func (c *Cadvisor) GetMetrics() []pdata.Metrics { +func (c *Cadvisor) GetMetrics() []pmetric.Metrics { c.logger.Debug("collect data from cadvisor...") - var result []pdata.Metrics + var result []pmetric.Metrics var containerinfos []*cInfo.ContainerInfo var err error diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go index 44429b286a5e..c74397e8a87b 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/cadvisor_nolinux.go @@ -18,7 +18,7 @@ package cadvisor // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors" @@ -62,6 +62,6 @@ func New(containerOrchestrator string, hostInfo HostInfo, logger *zap.Logger, op } // GetMetrics is a dummy function that always returns empty metrics for windows -func (c *Cadvisor) GetMetrics() []pdata.Metrics { - return []pdata.Metrics{} +func (c *Cadvisor) GetMetrics() []pmetric.Metrics { + return []pmetric.Metrics{} } diff --git a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go index 246b96cdc592..9ffa7cf86fcc 100644 --- a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go +++ b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -119,8 +119,8 @@ func New(clusterNameProvider clusterNameProvider, logger *zap.Logger, options .. } // GetMetrics returns an array of metrics -func (k *K8sAPIServer) GetMetrics() []pdata.Metrics { - var result []pdata.Metrics +func (k *K8sAPIServer) GetMetrics() []pmetric.Metrics { + var result []pmetric.Metrics // don't generate any metrics if the current collector is not the leader k.mu.Lock() diff --git a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go index 631696352997..32ceda4e7e7d 100644 --- a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go +++ b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -114,7 +114,7 @@ func (m *mockEventBroadcaster) NewRecorder(scheme *runtime.Scheme, source v1.Eve return record.NewFakeRecorder(100) } -func getStringAttrVal(m pdata.Metrics, key string) string { +func getStringAttrVal(m pmetric.Metrics, key string) string { rm := m.ResourceMetrics().At(0) attributes := rm.Resource().Attributes() if attributeValue, ok := attributes.Get(key); ok { @@ -123,7 +123,7 @@ func getStringAttrVal(m pdata.Metrics, key string) string { return "" } -func assertMetricValueEqual(t *testing.T, m pdata.Metrics, metricName string, expected int64) { +func assertMetricValueEqual(t *testing.T, m pmetric.Metrics, metricName string, expected int64) { rm := m.ResourceMetrics().At(0) ilms := rm.ScopeMetrics() @@ -132,11 +132,11 @@ func assertMetricValueEqual(t *testing.T, m pdata.Metrics, metricName string, ex for i := 0; i < metricSlice.Len(); i++ { metric := metricSlice.At(i) if metric.Name() == metricName { - if metric.DataType() == pdata.MetricDataTypeGauge { + if metric.DataType() == pmetric.MetricDataTypeGauge { switch metric.Gauge().DataPoints().At(0).ValueType() { - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: assert.Equal(t, expected, metric.Gauge().DataPoints().At(0).DoubleVal()) - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: assert.Equal(t, expected, metric.Gauge().DataPoints().At(0).IntVal()) } diff --git a/receiver/awscontainerinsightreceiver/receiver.go b/receiver/awscontainerinsightreceiver/receiver.go index 9f72b202ad30..b2ceec9945bb 100644 --- a/receiver/awscontainerinsightreceiver/receiver.go +++ b/receiver/awscontainerinsightreceiver/receiver.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ci "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight" @@ -36,7 +36,7 @@ import ( var _ component.MetricsReceiver = (*awsContainerInsightReceiver)(nil) type metricsProvider interface { - GetMetrics() []pdata.Metrics + GetMetrics() []pmetric.Metrics } // awsContainerInsightReceiver implements the component.MetricsReceiver @@ -138,7 +138,7 @@ func (acir *awsContainerInsightReceiver) Shutdown(context.Context) error { // collectData collects container stats from Amazon ECS Task Metadata Endpoint func (acir *awsContainerInsightReceiver) collectData(ctx context.Context) error { - var mds []pdata.Metrics + var mds []pmetric.Metrics if acir.cadvisor == nil && acir.k8sapiserver == nil { err := errors.New("both cadvisor and k8sapiserver failed to start") acir.settings.Logger.Error("Failed to collect stats", zap.Error(err)) diff --git a/receiver/awscontainerinsightreceiver/receiver_test.go b/receiver/awscontainerinsightreceiver/receiver_test.go index 5546d2c75b93..91c7ec87372e 100644 --- a/receiver/awscontainerinsightreceiver/receiver_test.go +++ b/receiver/awscontainerinsightreceiver/receiver_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ci "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/containerinsight" ) @@ -30,18 +30,18 @@ import ( type MockCadvisor struct { } -func (c *MockCadvisor) GetMetrics() []pdata.Metrics { - md := pdata.NewMetrics() - return []pdata.Metrics{md} +func (c *MockCadvisor) GetMetrics() []pmetric.Metrics { + md := pmetric.NewMetrics() + return []pmetric.Metrics{md} } // Mock k8sapiserver type MockK8sAPIServer struct { } -func (m *MockK8sAPIServer) GetMetrics() []pdata.Metrics { - md := pdata.NewMetrics() - return []pdata.Metrics{md} +func (m *MockK8sAPIServer) GetMetrics() []pmetric.Metrics { + md := pmetric.NewMetrics() + return []pmetric.Metrics{md} } func TestReceiver(t *testing.T) { diff --git a/receiver/awsecscontainermetricsreceiver/go.mod b/receiver/awsecscontainermetricsreceiver/go.mod index e4373ce83f7d..a29afcf9d1f7 100644 --- a/receiver/awsecscontainermetricsreceiver/go.mod +++ b/receiver/awsecscontainermetricsreceiver/go.mod @@ -6,8 +6,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -21,19 +22,20 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -43,3 +45,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil => ../../internal/aws/ecsutil replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/awsecscontainermetricsreceiver/go.sum b/receiver/awsecscontainermetricsreceiver/go.sum index 7ea8a3635236..28ef2172b901 100644 --- a/receiver/awsecscontainermetricsreceiver/go.sum +++ b/receiver/awsecscontainermetricsreceiver/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -83,7 +83,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -115,8 +114,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -159,8 +158,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -174,10 +171,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -225,7 +224,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -249,12 +249,13 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go index 6ef91eadd65b..4f753ae32dc6 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go @@ -17,7 +17,8 @@ package awsecscontainermetrics // import "github.com/open-telemetry/opentelemetr import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil" @@ -25,20 +26,20 @@ import ( // metricDataAccumulator defines the accumulator type metricDataAccumulator struct { - mds []pdata.Metrics + mds []pmetric.Metrics } // getMetricsData generates OT Metrics data from task metadata and docker stats func (acc *metricDataAccumulator) getMetricsData(containerStatsMap map[string]*ContainerStats, metadata ecsutil.TaskMetadata, logger *zap.Logger) { taskMetrics := ECSMetrics{} - timestamp := pdata.NewTimestampFromTime(time.Now()) + timestamp := pcommon.NewTimestampFromTime(time.Now()) taskResource := taskResource(metadata) for _, containerMetadata := range metadata.Containers { containerResource := containerResource(containerMetadata, logger) - taskResource.Attributes().Range(func(k string, av pdata.Value) bool { + taskResource.Attributes().Range(func(k string, av pcommon.Value) bool { containerResource.Attributes().Upsert(k, av) return true }) @@ -67,7 +68,7 @@ func (acc *metricDataAccumulator) getMetricsData(containerStatsMap map[string]*C acc.accumulate(convertToOTLPMetrics(taskPrefix, taskMetrics, taskResource, timestamp)) } -func (acc *metricDataAccumulator) accumulate(md pdata.Metrics) { +func (acc *metricDataAccumulator) accumulate(md pmetric.Metrics) { acc.mds = append(acc.mds, md) } diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/metrics.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/metrics.go index 9adbc7a30e70..948f68ac767c 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/metrics.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/metrics.go @@ -15,14 +15,14 @@ package awsecscontainermetrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil" ) // MetricsData generates OTLP metrics from endpoint raw data -func MetricsData(containerStatsMap map[string]*ContainerStats, metadata ecsutil.TaskMetadata, logger *zap.Logger) []pdata.Metrics { +func MetricsData(containerStatsMap map[string]*ContainerStats, metadata ecsutil.TaskMetadata, logger *zap.Logger) []pmetric.Metrics { acc := &metricDataAccumulator{} acc.getMetricsData(containerStatsMap, metadata, logger) diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource.go index b6b4deb62b0f..0abcf9a21874 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource.go @@ -17,16 +17,16 @@ package awsecscontainermetrics // import "github.com/open-telemetry/opentelemetr import ( "strings" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/docker" ) -func containerResource(cm ecsutil.ContainerMetadata, logger *zap.Logger) pdata.Resource { - resource := pdata.NewResource() +func containerResource(cm ecsutil.ContainerMetadata, logger *zap.Logger) pcommon.Resource { + resource := pcommon.NewResource() image, err := docker.ParseImageName(cm.Image) if err != nil { @@ -51,8 +51,8 @@ func containerResource(cm ecsutil.ContainerMetadata, logger *zap.Logger) pdata.R return resource } -func taskResource(tm ecsutil.TaskMetadata) pdata.Resource { - resource := pdata.NewResource() +func taskResource(tm ecsutil.TaskMetadata) pcommon.Resource { + resource := pcommon.NewResource() region, accountID, taskID := getResourceFromARN(tm.TaskARN) resource.Attributes().UpsertString(attributeECSCluster, getNameFromCluster(tm.Cluster)) resource.Attributes().UpsertString(conventions.AttributeAWSECSTaskARN, tm.TaskARN) diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go index 2d91a9542505..d40b03dcf960 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/resource_test.go @@ -18,8 +18,8 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil" @@ -169,7 +169,7 @@ func TestTaskResourceWithClusterARN(t *testing.T) { verifyAttributeMap(t, expected, attrMap) } -func verifyAttributeMap(t *testing.T, expected map[string]string, found pdata.Map) { +func verifyAttributeMap(t *testing.T, expected map[string]string, found pcommon.Map) { for key, val := range expected { attributeVal, found := found.Get(key) require.EqualValues(t, true, found) diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator.go index a63f18745bdc..b2363816d20d 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator.go @@ -15,12 +15,13 @@ package awsecscontainermetrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics" import ( - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) -func convertToOTLPMetrics(prefix string, m ECSMetrics, r pdata.Resource, timestamp pdata.Timestamp) pdata.Metrics { - md := pdata.NewMetrics() +func convertToOTLPMetrics(prefix string, m ECSMetrics, r pcommon.Resource, timestamp pcommon.Timestamp) pmetric.Metrics { + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() rm.SetSchemaUrl(conventions.SchemaURL) r.CopyTo(rm.Resource()) @@ -61,8 +62,8 @@ func convertToOTLPMetrics(prefix string, m ECSMetrics, r pdata.Resource, timesta return md } -func convertStoppedContainerDataToOTMetrics(prefix string, containerResource pdata.Resource, timestamp pdata.Timestamp, duration float64) pdata.Metrics { - md := pdata.NewMetrics() +func convertStoppedContainerDataToOTMetrics(prefix string, containerResource pcommon.Resource, timestamp pcommon.Timestamp, duration float64) pmetric.Metrics { + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() containerResource.CopyTo(rm.Resource()) ilms := rm.ScopeMetrics() @@ -72,41 +73,41 @@ func convertStoppedContainerDataToOTMetrics(prefix string, containerResource pda return md } -func appendIntGauge(metricName string, unit string, value int64, ts pdata.Timestamp, ilm pdata.ScopeMetrics) { +func appendIntGauge(metricName string, unit string, value int64, ts pcommon.Timestamp, ilm pmetric.ScopeMetrics) { metric := appendMetric(ilm, metricName, unit) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) intGauge := metric.Gauge() appendIntDataPoint(intGauge.DataPoints(), value, ts) } -func appendIntSum(metricName string, unit string, value int64, ts pdata.Timestamp, ilm pdata.ScopeMetrics) { +func appendIntSum(metricName string, unit string, value int64, ts pcommon.Timestamp, ilm pmetric.ScopeMetrics) { metric := appendMetric(ilm, metricName, unit) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) intSum := metric.Sum() - intSum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + intSum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) appendIntDataPoint(intSum.DataPoints(), value, ts) } -func appendDoubleGauge(metricName string, unit string, value float64, ts pdata.Timestamp, ilm pdata.ScopeMetrics) { +func appendDoubleGauge(metricName string, unit string, value float64, ts pcommon.Timestamp, ilm pmetric.ScopeMetrics) { metric := appendMetric(ilm, metricName, unit) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) doubleGauge := metric.Gauge() dataPoint := doubleGauge.DataPoints().AppendEmpty() dataPoint.SetDoubleVal(value) dataPoint.SetTimestamp(ts) } -func appendIntDataPoint(dataPoints pdata.NumberDataPointSlice, value int64, ts pdata.Timestamp) { +func appendIntDataPoint(dataPoints pmetric.NumberDataPointSlice, value int64, ts pcommon.Timestamp) { dataPoint := dataPoints.AppendEmpty() dataPoint.SetIntVal(value) dataPoint.SetTimestamp(ts) } -func appendMetric(ilm pdata.ScopeMetrics, name, unit string) pdata.Metric { +func appendMetric(ilm pmetric.ScopeMetrics, name, unit string) pmetric.Metric { metric := ilm.Metrics().AppendEmpty() metric.SetName(name) metric.SetUnit(unit) diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator_test.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator_test.go index 1d426f16bc40..a6e9417993e2 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator_test.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/translator_test.go @@ -19,12 +19,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestConvertToOTMetrics(t *testing.T) { - timestamp := pdata.NewTimestampFromTime(time.Now()) + timestamp := pcommon.NewTimestampFromTime(time.Now()) m := ECSMetrics{} m.MemoryUsage = 100 @@ -33,7 +34,7 @@ func TestConvertToOTMetrics(t *testing.T) { m.MemoryReserved = 100 m.CPUTotalUsage = 100 - resource := pdata.NewResource() + resource := pcommon.NewResource() md := convertToOTLPMetrics("container.", m, resource, timestamp) require.EqualValues(t, 26, md.ResourceMetrics().At(0).ScopeMetrics().Len()) assert.EqualValues(t, conventions.SchemaURL, md.ResourceMetrics().At(0).SchemaUrl()) @@ -41,34 +42,34 @@ func TestConvertToOTMetrics(t *testing.T) { func TestIntGauge(t *testing.T) { intValue := int64(100) - timestamp := pdata.NewTimestampFromTime(time.Now()) + timestamp := pcommon.NewTimestampFromTime(time.Now()) - ilm := pdata.NewScopeMetrics() + ilm := pmetric.NewScopeMetrics() appendIntGauge("cpu_utilized", "Count", intValue, timestamp, ilm) require.NotNil(t, ilm) } func TestDoubleGauge(t *testing.T) { - timestamp := pdata.NewTimestampFromTime(time.Now()) + timestamp := pcommon.NewTimestampFromTime(time.Now()) floatValue := 100.01 - ilm := pdata.NewScopeMetrics() + ilm := pmetric.NewScopeMetrics() appendDoubleGauge("cpu_utilized", "Count", floatValue, timestamp, ilm) require.NotNil(t, ilm) } func TestIntSum(t *testing.T) { - timestamp := pdata.NewTimestampFromTime(time.Now()) + timestamp := pcommon.NewTimestampFromTime(time.Now()) intValue := int64(100) - ilm := pdata.NewScopeMetrics() + ilm := pmetric.NewScopeMetrics() appendIntSum("cpu_utilized", "Count", intValue, timestamp, ilm) require.NotNil(t, ilm) } func TestConvertStoppedContainerDataToOTMetrics(t *testing.T) { - timestamp := pdata.NewTimestampFromTime(time.Now()) - resource := pdata.NewResource() + timestamp := pcommon.NewTimestampFromTime(time.Now()) + resource := pcommon.NewResource() duration := 1200000000.32132 md := convertStoppedContainerDataToOTMetrics("container.", resource, timestamp, duration) require.EqualValues(t, 1, md.ResourceMetrics().At(0).ScopeMetrics().Len()) diff --git a/receiver/awsfirehosereceiver/go.mod b/receiver/awsfirehosereceiver/go.mod index ee7b09eb9404..d3387c59cd25 100644 --- a/receiver/awsfirehosereceiver/go.mod +++ b/receiver/awsfirehosereceiver/go.mod @@ -4,8 +4,9 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -19,7 +20,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -28,16 +29,18 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/awsfirehosereceiver/go.sum b/receiver/awsfirehosereceiver/go.sum index 295e2be3f936..71ea0eca3fb5 100644 --- a/receiver/awsfirehosereceiver/go.sum +++ b/receiver/awsfirehosereceiver/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -85,7 +85,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -117,8 +116,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -166,8 +165,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -181,10 +178,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -232,8 +231,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -258,12 +257,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder.go b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder.go index 6110454f5fe0..51a5ea905191 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder.go @@ -18,8 +18,9 @@ import ( "strings" "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) const ( @@ -70,9 +71,9 @@ func (rmb *resourceMetricsBuilder) AddMetric(metric cWMetric) { mb.AddDataPoint(metric) } -// Build updates the passed in pdata.ResourceMetrics with the metrics in +// Build updates the passed in pmetric.ResourceMetrics with the metrics in // the builder. -func (rmb *resourceMetricsBuilder) Build(rm pdata.ResourceMetrics) { +func (rmb *resourceMetricsBuilder) Build(rm pmetric.ResourceMetrics) { ilm := rm.ScopeMetrics().AppendEmpty() rmb.setAttributes(rm.Resource()) for _, mb := range rmb.metricBuilders { @@ -80,8 +81,8 @@ func (rmb *resourceMetricsBuilder) Build(rm pdata.ResourceMetrics) { } } -// setAttributes creates a pdata.Resource from the fields in the resourceMetricsBuilder. -func (rmb *resourceMetricsBuilder) setAttributes(resource pdata.Resource) { +// setAttributes creates a pcommon.Resource from the fields in the resourceMetricsBuilder. +func (rmb *resourceMetricsBuilder) setAttributes(resource pcommon.Resource) { attributes := resource.Attributes() attributes.InsertString(conventions.AttributeCloudProvider, conventions.AttributeCloudProviderAWS) attributes.InsertString(conventions.AttributeCloudAccountID, rmb.accountID) @@ -124,7 +125,7 @@ type metricBuilder struct { unit string // dataPoints is the slice of summary data points // for the metric. - dataPoints pdata.SummaryDataPointSlice + dataPoints pmetric.SummaryDataPointSlice // seen is the set of added data point keys. seen map[dataPointKey]bool } @@ -134,7 +135,7 @@ func newMetricBuilder(name, unit string) *metricBuilder { return &metricBuilder{ name: name, unit: unit, - dataPoints: pdata.NewSummaryDataPointSlice(), + dataPoints: pmetric.NewSummaryDataPointSlice(), seen: make(map[dataPointKey]bool), } } @@ -152,18 +153,18 @@ func (mb *metricBuilder) AddDataPoint(metric cWMetric) { } } -// Build builds the pdata.Metric with the data points that were added +// Build builds the pmetric.Metric with the data points that were added // with AddDataPoint. -func (mb *metricBuilder) Build(metric pdata.Metric) { +func (mb *metricBuilder) Build(metric pmetric.Metric) { metric.SetName(mb.name) metric.SetUnit(mb.unit) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) mb.dataPoints.MoveAndAppendTo(metric.Summary().DataPoints()) } // toDataPoint converts a cWMetric into a pdata datapoint and attaches the // dimensions as attributes. -func (mb *metricBuilder) toDataPoint(dp pdata.SummaryDataPoint, metric cWMetric) { +func (mb *metricBuilder) toDataPoint(dp pmetric.SummaryDataPoint, metric cWMetric) { dp.SetCount(uint64(metric.Value.Count)) dp.SetSum(metric.Value.Sum) qv := dp.QuantileValues() @@ -173,7 +174,7 @@ func (mb *metricBuilder) toDataPoint(dp pdata.SummaryDataPoint, metric cWMetric) max := qv.AppendEmpty() max.SetQuantile(1) max.SetValue(metric.Value.Max) - dp.SetTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(metric.Timestamp))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(metric.Timestamp))) for k, v := range metric.Dimensions { dp.Attributes().InsertString(ToSemConvAttributeKey(k), v) } diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder_test.go b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder_test.go index 1cfc655d633b..a98a38f6d0a1 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder_test.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/metricsbuilder_test.go @@ -20,8 +20,8 @@ import ( "time" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pmetric" ) const ( @@ -65,11 +65,11 @@ func TestMetricBuilder(t *testing.T) { } mb := newMetricBuilder(metric.MetricName, metric.Unit) mb.AddDataPoint(metric) - got := pdata.NewMetric() + got := pmetric.NewMetric() mb.Build(got) require.Equal(t, metric.MetricName, got.Name()) require.Equal(t, metric.Unit, got.Unit()) - require.Equal(t, pdata.MetricDataTypeSummary, got.DataType()) + require.Equal(t, pmetric.MetricDataTypeSummary, got.DataType()) gotDps := got.Summary().DataPoints() require.Equal(t, 1, gotDps.Len()) gotDp := gotDps.At(0) @@ -106,7 +106,7 @@ func TestMetricBuilder(t *testing.T) { for _, metric := range metrics { mb.AddDataPoint(metric) } - got := pdata.NewMetric() + got := pmetric.NewMetric() mb.Build(got) gotDps := got.Summary().DataPoints() require.Equal(t, 1, gotDps.Len()) @@ -160,7 +160,7 @@ func TestResourceMetricsBuilder(t *testing.T) { } rmb := newResourceMetricsBuilder(attrs) rmb.AddMetric(metric) - got := pdata.NewResourceMetrics() + got := pmetric.NewResourceMetrics() rmb.Build(got) gotAttrs := got.Resource().Attributes() for wantKey, wantValue := range testCase.wantAttributes { @@ -203,7 +203,7 @@ func TestResourceMetricsBuilder(t *testing.T) { for _, metric := range metrics { rmb.AddMetric(metric) } - got := pdata.NewResourceMetrics() + got := pmetric.NewResourceMetrics() rmb.Build(got) require.Equal(t, 1, got.ScopeMetrics().Len()) gotMetrics := got.ScopeMetrics().At(0).Metrics() diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler.go b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler.go index 56624ef8f05e..e2773360b2af 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/cwmetricstream/unmarshaler.go @@ -19,7 +19,7 @@ import ( "encoding/json" "errors" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler" @@ -50,9 +50,9 @@ func NewUnmarshaler(logger *zap.Logger) *Unmarshaler { } // Unmarshal deserializes the records into cWMetrics and uses the -// resourceMetricsBuilder to group them into a single pdata.Metrics. +// resourceMetricsBuilder to group them into a single pmetric.Metrics. // Skips invalid cWMetrics received in the record and -func (u Unmarshaler) Unmarshal(records [][]byte) (pdata.Metrics, error) { +func (u Unmarshaler) Unmarshal(records [][]byte) (pmetric.Metrics, error) { builders := make(map[resourceAttributes]*resourceMetricsBuilder) for recordIndex, record := range records { // Multiple metrics in each record separated by newline character @@ -94,10 +94,10 @@ func (u Unmarshaler) Unmarshal(records [][]byte) (pdata.Metrics, error) { } if len(builders) == 0 { - return pdata.NewMetrics(), errInvalidRecords + return pmetric.NewMetrics(), errInvalidRecords } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() for _, builder := range builders { builder.Build(md.ResourceMetrics().AppendEmpty()) } diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshaler.go b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshaler.go index a041eb32e54e..4db2ce4bff8c 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshaler.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshaler.go @@ -15,13 +15,13 @@ package unmarshaler // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricsUnmarshaler deserializes the message body type MetricsUnmarshaler interface { // Unmarshal deserializes the records into metrics. - Unmarshal(records [][]byte) (pdata.Metrics, error) + Unmarshal(records [][]byte) (pmetric.Metrics, error) // Type of the serialized messages. Type() string diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler.go b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler.go index 23272331fc14..b97ef38847cc 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler.go @@ -15,7 +15,7 @@ package unmarshalertest // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler" ) @@ -25,21 +25,21 @@ const typeStr = "nop" // NopMetricsUnmarshaler is a MetricsUnmarshaler that doesn't do anything // with the inputs and just returns the metrics and error passed in. type NopMetricsUnmarshaler struct { - metrics pdata.Metrics + metrics pmetric.Metrics err error } var _ unmarshaler.MetricsUnmarshaler = (*NopMetricsUnmarshaler)(nil) // NewNopMetrics provides a nop metrics unmarshaler with the default -// pdata.Metrics and no error. +// pmetric.Metrics and no error. func NewNopMetrics() *NopMetricsUnmarshaler { return &NopMetricsUnmarshaler{} } // NewWithMetrics provides a nop metrics unmarshaler with the passed // in metrics as the result of the Unmarshal and no error. -func NewWithMetrics(metrics pdata.Metrics) *NopMetricsUnmarshaler { +func NewWithMetrics(metrics pmetric.Metrics) *NopMetricsUnmarshaler { return &NopMetricsUnmarshaler{metrics: metrics} } @@ -50,7 +50,7 @@ func NewErrMetrics(err error) *NopMetricsUnmarshaler { } // Unmarshal deserializes the records into metrics. -func (u *NopMetricsUnmarshaler) Unmarshal([][]byte) (pdata.Metrics, error) { +func (u *NopMetricsUnmarshaler) Unmarshal([][]byte) (pmetric.Metrics, error) { return u.metrics, u.err } diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler_test.go b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler_test.go index 3368eb04c541..6757a68dfecb 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler_test.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_unmarshaler_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestNewNopMetrics(t *testing.T) { @@ -31,7 +31,7 @@ func TestNewNopMetrics(t *testing.T) { } func TestNewWithMetrics(t *testing.T) { - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() metrics.ResourceMetrics().AppendEmpty() unmarshaler := NewWithMetrics(metrics) got, err := unmarshaler.Unmarshal(nil) diff --git a/receiver/awsfirehosereceiver/metrics_receiver.go b/receiver/awsfirehosereceiver/metrics_receiver.go index dfced9fc0010..626300fb64b0 100644 --- a/receiver/awsfirehosereceiver/metrics_receiver.go +++ b/receiver/awsfirehosereceiver/metrics_receiver.go @@ -69,8 +69,8 @@ func newMetricsReceiver( } // Consume uses the configured unmarshaler to deserialize the records into a -// single pdata.Metrics. If there are common attributes available, then it will -// attach those to each of the pdata.Resources. It will send the final result +// single pmetric.Metrics. If there are common attributes available, then it will +// attach those to each of the pcommon.Resources. It will send the final result // to the next consumer. func (mc *metricsConsumer) Consume(ctx context.Context, records [][]byte, commonAttributes map[string]string) (int, error) { md, err := mc.unmarshaler.Unmarshal(records) diff --git a/receiver/awsfirehosereceiver/metrics_receiver_test.go b/receiver/awsfirehosereceiver/metrics_receiver_test.go index 90e092bad03d..c8c938feb49d 100644 --- a/receiver/awsfirehosereceiver/metrics_receiver_test.go +++ b/receiver/awsfirehosereceiver/metrics_receiver_test.go @@ -25,19 +25,19 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest" ) type recordConsumer struct { - result pdata.Metrics + result pmetric.Metrics } var _ consumer.Metrics = (*recordConsumer)(nil) -func (rc *recordConsumer) ConsumeMetrics(_ context.Context, metrics pdata.Metrics) error { +func (rc *recordConsumer) ConsumeMetrics(_ context.Context, metrics pmetric.Metrics) error { rc.result = metrics return nil } @@ -116,7 +116,7 @@ func TestMetricsConsumer(t *testing.T) { } t.Run("WithCommonAttributes", func(t *testing.T) { - base := pdata.NewMetrics() + base := pmetric.NewMetrics() base.ResourceMetrics().AppendEmpty() rc := recordConsumer{} mc := &metricsConsumer{ diff --git a/receiver/awsxrayreceiver/go.mod b/receiver/awsxrayreceiver/go.mod index 933afec0f29e..cf2be042a5f8 100644 --- a/receiver/awsxrayreceiver/go.mod +++ b/receiver/awsxrayreceiver/go.mod @@ -9,8 +9,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -20,27 +20,27 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 - go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk v1.6.3 // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/proxy => ./../../internal/aws/proxy @@ -50,3 +50,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/x replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/awsxrayreceiver/go.sum b/receiver/awsxrayreceiver/go.sum index 6d6e63d69ba1..6cde0deac64f 100644 --- a/receiver/awsxrayreceiver/go.sum +++ b/receiver/awsxrayreceiver/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -72,7 +72,6 @@ github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -104,8 +103,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -148,8 +147,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -164,20 +161,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= -go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= -go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -211,8 +208,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -237,8 +234,8 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/awsxrayreceiver/internal/translator/addtoattrs.go b/receiver/awsxrayreceiver/internal/translator/addtoattrs.go index 136638de0587..3c9252f177e2 100644 --- a/receiver/awsxrayreceiver/internal/translator/addtoattrs.go +++ b/receiver/awsxrayreceiver/internal/translator/addtoattrs.go @@ -14,23 +14,21 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/translator" -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/pdata/pcommon" -func addBool(val *bool, attrKey string, attrs *pdata.Map) { +func addBool(val *bool, attrKey string, attrs *pcommon.Map) { if val != nil { attrs.UpsertBool(attrKey, *val) } } -func addString(val *string, attrKey string, attrs *pdata.Map) { +func addString(val *string, attrKey string, attrs *pcommon.Map) { if val != nil { attrs.UpsertString(attrKey, *val) } } -func addInt64(val *int64, attrKey string, attrs *pdata.Map) { +func addInt64(val *int64, attrKey string, attrs *pcommon.Map) { if val != nil { attrs.UpsertInt(attrKey, *val) } diff --git a/receiver/awsxrayreceiver/internal/translator/annotations.go b/receiver/awsxrayreceiver/internal/translator/annotations.go index c29ae7f5c4d3..01c1bf6321c1 100644 --- a/receiver/awsxrayreceiver/internal/translator/annotations.go +++ b/receiver/awsxrayreceiver/internal/translator/annotations.go @@ -14,11 +14,9 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/translator" -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/pdata/pcommon" -func addAnnotations(annos map[string]interface{}, attrs *pdata.Map) { +func addAnnotations(annos map[string]interface{}, attrs *pcommon.Map) { for k, v := range annos { switch t := v.(type) { case int: diff --git a/receiver/awsxrayreceiver/internal/translator/annotations_test.go b/receiver/awsxrayreceiver/internal/translator/annotations_test.go index b8f0f2431396..16f13fc5cff1 100644 --- a/receiver/awsxrayreceiver/internal/translator/annotations_test.go +++ b/receiver/awsxrayreceiver/internal/translator/annotations_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestAddAnnotations(t *testing.T) { @@ -30,11 +30,11 @@ func TestAddAnnotations(t *testing.T) { input["float32"] = float32(4.5) input["float64"] = 5.5 - attrMap := pdata.NewMap() + attrMap := pcommon.NewMap() attrMap.EnsureCapacity(initAttrCapacity) addAnnotations(input, &attrMap) - expectedAttrMap := pdata.NewMapFromRaw( + expectedAttrMap := pcommon.NewMapFromRaw( map[string]interface{}{ "int": 0, "int32": int32(1), diff --git a/receiver/awsxrayreceiver/internal/translator/aws.go b/receiver/awsxrayreceiver/internal/translator/aws.go index 4cfbcc2a9891..226648430fbc 100644 --- a/receiver/awsxrayreceiver/internal/translator/aws.go +++ b/receiver/awsxrayreceiver/internal/translator/aws.go @@ -17,13 +17,13 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector- import ( "strconv" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func addAWSToResource(aws *awsxray.AWSData, attrs *pdata.Map) { +func addAWSToResource(aws *awsxray.AWSData, attrs *pcommon.Map) { if aws == nil { // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/c615d2db351929b99e46f7b427f39c12afe15b54/exporter/awsxrayexporter/translator/aws.go#L121 // this implies that the current segment being processed is not generated @@ -66,7 +66,7 @@ func addAWSToResource(aws *awsxray.AWSData, attrs *pdata.Map) { } } -func addAWSToSpan(aws *awsxray.AWSData, attrs *pdata.Map) { +func addAWSToSpan(aws *awsxray.AWSData, attrs *pcommon.Map) { if aws != nil { addString(aws.AccountID, awsxray.AWSAccountAttribute, attrs) addString(aws.Operation, awsxray.AWSOperationAttribute, attrs) diff --git a/receiver/awsxrayreceiver/internal/translator/cause.go b/receiver/awsxrayreceiver/internal/translator/cause.go index cfa7d1d855ae..fc94fc7638a4 100644 --- a/receiver/awsxrayreceiver/internal/translator/cause.go +++ b/receiver/awsxrayreceiver/internal/translator/cause.go @@ -18,8 +18,8 @@ import ( "strconv" "strings" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -28,7 +28,7 @@ import ( // TODO: Remove this when collector defines this semantic convention. const ExceptionEventName = "exception" -func addCause(seg *awsxray.Segment, span *pdata.Span) { +func addCause(seg *awsxray.Segment, span *ptrace.Span) { if seg.Cause == nil { return } @@ -41,9 +41,9 @@ func addCause(seg *awsxray.Segment, span *pdata.Span) { // temporarily setting the status to otlptrace.Status_UnknownError. This will be // updated to a more specific error in the `segToSpans()` in translator.go once // we traverse through all the subsegments. - if span.Status().Code() == pdata.StatusCodeUnset { + if span.Status().Code() == ptrace.StatusCodeUnset { // StatusCodeUnset is the default value for the span.Status(). - span.Status().SetCode(pdata.StatusCodeError) + span.Status().SetCode(ptrace.StatusCodeError) } switch seg.Cause.Type { diff --git a/receiver/awsxrayreceiver/internal/translator/http.go b/receiver/awsxrayreceiver/internal/translator/http.go index d5e66b50a9f9..07515df59d19 100644 --- a/receiver/awsxrayreceiver/internal/translator/http.go +++ b/receiver/awsxrayreceiver/internal/translator/http.go @@ -15,14 +15,14 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awsxrayreceiver/internal/translator" import ( - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" ) -func addHTTP(seg *awsxray.Segment, span *pdata.Span) { +func addHTTP(seg *awsxray.Segment, span *ptrace.Span) { if seg.HTTP == nil { return } diff --git a/receiver/awsxrayreceiver/internal/translator/metadata.go b/receiver/awsxrayreceiver/internal/translator/metadata.go index c8358c7a41f0..3daec5ab1a20 100644 --- a/receiver/awsxrayreceiver/internal/translator/metadata.go +++ b/receiver/awsxrayreceiver/internal/translator/metadata.go @@ -17,12 +17,12 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector- import ( "encoding/json" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func addMetadata(meta map[string]map[string]interface{}, attrs *pdata.Map) error { +func addMetadata(meta map[string]map[string]interface{}, attrs *pcommon.Map) error { for k, v := range meta { val, err := json.Marshal(v) if err != nil { diff --git a/receiver/awsxrayreceiver/internal/translator/name.go b/receiver/awsxrayreceiver/internal/translator/name.go index c3b317d5cded..533e12c9bc7d 100644 --- a/receiver/awsxrayreceiver/internal/translator/name.go +++ b/receiver/awsxrayreceiver/internal/translator/name.go @@ -17,7 +17,7 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector- import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -27,7 +27,7 @@ const ( validRemoteNamespace = "remote" ) -func addNameAndNamespace(seg *awsxray.Segment, span *pdata.Span) error { +func addNameAndNamespace(seg *awsxray.Segment, span *ptrace.Span) error { // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/c615d2db351929b99e46f7b427f39c12afe15b54/exporter/awsxrayexporter/translator/segment.go#L160 span.SetName(*seg.Name) @@ -37,12 +37,12 @@ func addNameAndNamespace(seg *awsxray.Segment, span *pdata.Span) error { // The `ClientIP` is not nil, it implies that this segment is generated // by a server serving an incoming request - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) } if seg.Namespace == nil { - if span.Kind() == pdata.SpanKindUnspecified { - span.SetKind(pdata.SpanKindInternal) + if span.Kind() == ptrace.SpanKindUnspecified { + span.SetKind(ptrace.SpanKindInternal) } return nil } @@ -52,7 +52,7 @@ func addNameAndNamespace(seg *awsxray.Segment, span *pdata.Span) error { attrs := span.Attributes() // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/c615d2db351929b99e46f7b427f39c12afe15b54/exporter/awsxrayexporter/translator/segment.go#L163 // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#spankind - span.SetKind(pdata.SpanKindClient) + span.SetKind(ptrace.SpanKindClient) switch *seg.Namespace { case validAWSNamespace: // https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/c615d2db351929b99e46f7b427f39c12afe15b54/exporter/awsxrayexporter/translator/segment.go#L116 diff --git a/receiver/awsxrayreceiver/internal/translator/sdk.go b/receiver/awsxrayreceiver/internal/translator/sdk.go index 72a809d65e12..90a354f5d858 100644 --- a/receiver/awsxrayreceiver/internal/translator/sdk.go +++ b/receiver/awsxrayreceiver/internal/translator/sdk.go @@ -17,13 +17,13 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector- import ( "strings" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func addSdkToResource(seg *awsxray.Segment, attrs *pdata.Map) { +func addSdkToResource(seg *awsxray.Segment, attrs *pcommon.Map) { if seg.AWS != nil && seg.AWS.XRay != nil { xr := seg.AWS.XRay addString(xr.SDKVersion, conventions.AttributeTelemetrySDKVersion, attrs) diff --git a/receiver/awsxrayreceiver/internal/translator/sql.go b/receiver/awsxrayreceiver/internal/translator/sql.go index 62d5b52dd101..db9c68f23201 100644 --- a/receiver/awsxrayreceiver/internal/translator/sql.go +++ b/receiver/awsxrayreceiver/internal/translator/sql.go @@ -18,13 +18,13 @@ import ( "fmt" "regexp" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) -func addSQLToSpan(sql *awsxray.SQLData, attrs *pdata.Map) error { +func addSQLToSpan(sql *awsxray.SQLData, attrs *pcommon.Map) error { if sql == nil { return nil } diff --git a/receiver/awsxrayreceiver/internal/translator/time.go b/receiver/awsxrayreceiver/internal/translator/time.go index 2d8e906b4b1a..af975a9e4ee5 100644 --- a/receiver/awsxrayreceiver/internal/translator/time.go +++ b/receiver/awsxrayreceiver/internal/translator/time.go @@ -17,19 +17,20 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector- import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) -func addStartTime(startTime *float64, span *pdata.Span) { +func addStartTime(startTime *float64, span *ptrace.Span) { span.SetStartTimestamp(floatSecToNanoEpoch(startTime)) } -func addEndTime(endTime *float64, span *pdata.Span) { +func addEndTime(endTime *float64, span *ptrace.Span) { if endTime != nil { span.SetEndTimestamp(floatSecToNanoEpoch(endTime)) } } -func floatSecToNanoEpoch(epochSec *float64) pdata.Timestamp { - return pdata.Timestamp((*epochSec) * float64(time.Second)) +func floatSecToNanoEpoch(epochSec *float64) pcommon.Timestamp { + return pcommon.Timestamp((*epochSec) * float64(time.Second)) } diff --git a/receiver/awsxrayreceiver/internal/translator/translator.go b/receiver/awsxrayreceiver/internal/translator/translator.go index 838a3f2a2c22..e4f5f5ee3fc5 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator.go +++ b/receiver/awsxrayreceiver/internal/translator/translator.go @@ -19,8 +19,9 @@ import ( "encoding/json" "errors" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -34,7 +35,7 @@ const ( // `toPdata` in this receiver to a common package later // ToTraces converts X-Ray segment (and its subsegments) to an OT ResourceSpans. -func ToTraces(rawSeg []byte) (*pdata.Traces, int, error) { +func ToTraces(rawSeg []byte) (*ptrace.Traces, int, error) { var seg awsxray.Segment err := json.Unmarshal(rawSeg, &seg) if err != nil { @@ -49,9 +50,9 @@ func ToTraces(rawSeg []byte) (*pdata.Traces, int, error) { return nil, count, err } - traceData := pdata.NewTraces() + traceData := ptrace.NewTraces() rspanSlice := traceData.ResourceSpans() - // ## allocate a new pdata.ResourceSpans for the segment document + // ## allocate a new ptrace.ResourceSpans for the segment document // (potentially with embedded subsegments) rspan := rspanSlice.AppendEmpty() @@ -82,7 +83,7 @@ func ToTraces(rawSeg []byte) (*pdata.Traces, int, error) { func segToSpans(seg awsxray.Segment, traceID, parentID *string, - spans *pdata.SpanSlice) (*pdata.Span, error) { + spans *ptrace.SpanSlice) (*ptrace.Span, error) { span := spans.AppendEmpty() @@ -91,7 +92,7 @@ func segToSpans(seg awsxray.Segment, return nil, err } - var populatedChildSpan *pdata.Span + var populatedChildSpan *ptrace.Span for _, s := range seg.Subsegments { populatedChildSpan, err = segToSpans(s, traceID, seg.ID, @@ -101,14 +102,14 @@ func segToSpans(seg awsxray.Segment, } if seg.Cause != nil && - populatedChildSpan.Status().Code() != pdata.StatusCodeUnset { + populatedChildSpan.Status().Code() != ptrace.StatusCodeUnset { // if seg.Cause is not nil, then one of the subsegments must contain a // HTTP error code. Also, span.Status().Code() is already // set to `StatusCodeUnknownError` by `addCause()` in // `populateSpan()` above, so here we are just trying to figure out // whether we can get an even more specific error code. - if span.Status().Code() == pdata.StatusCodeError { + if span.Status().Code() == ptrace.StatusCodeError { // update the error code to a possibly more specific code span.Status().SetCode(populatedChildSpan.Status().Code()) } @@ -121,7 +122,7 @@ func segToSpans(seg awsxray.Segment, func populateSpan( seg *awsxray.Segment, traceID, parentID *string, - span *pdata.Span) error { + span *ptrace.Span) error { attrs := span.Attributes() attrs.Clear() @@ -169,13 +170,13 @@ func populateSpan( return err } - span.SetTraceID(pdata.NewTraceID(traceIDBytes)) - span.SetSpanID(pdata.NewSpanID(spanIDBytes)) + span.SetTraceID(pcommon.NewTraceID(traceIDBytes)) + span.SetSpanID(pcommon.NewSpanID(spanIDBytes)) if parentIDBytes != [8]byte{} { - span.SetParentSpanID(pdata.NewSpanID(parentIDBytes)) + span.SetParentSpanID(pcommon.NewSpanID(parentIDBytes)) } else { - span.SetKind(pdata.SpanKindServer) + span.SetKind(ptrace.SpanKindServer) } addStartTime(seg.StartTime, span) @@ -199,8 +200,8 @@ func populateSpan( return nil } -func populateResource(seg *awsxray.Segment, rs *pdata.Resource) { - // allocate a new attribute map within the Resource in the pdata.ResourceSpans allocated above +func populateResource(seg *awsxray.Segment, rs *pcommon.Resource) { + // allocate a new attribute map within the Resource in the ptrace.ResourceSpans allocated above attrs := rs.Attributes() attrs.Clear() attrs.EnsureCapacity(initAttrCapacity) diff --git a/receiver/awsxrayreceiver/internal/translator/translator_test.go b/receiver/awsxrayreceiver/internal/translator/translator_test.go index 66c82d508d08..e20d7e6e70b4 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator_test.go +++ b/receiver/awsxrayreceiver/internal/translator/translator_test.go @@ -23,8 +23,9 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) @@ -36,25 +37,25 @@ type perSpanProperties struct { name string startTimeSec float64 endTimeSec *float64 - spanKind pdata.SpanKind + spanKind ptrace.SpanKind spanStatus spanSt eventsProps []eventProps - attrs pdata.Map + attrs pcommon.Map } type spanSt struct { message string - code pdata.StatusCode + code ptrace.StatusCode } type eventProps struct { name string - attrs pdata.Map + attrs pcommon.Map } func TestTranslation(t *testing.T) { - var defaultServerSpanAttrs = func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMapFromRaw(map[string]interface{}{ + var defaultServerSpanAttrs = func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPMethod: *seg.HTTP.Request.Method, conventions.AttributeHTTPClientIP: *seg.HTTP.Request.ClientIP, conventions.AttributeHTTPUserAgent: *seg.HTTP.Request.UserAgent, @@ -68,19 +69,19 @@ func TestTranslation(t *testing.T) { testCase string expectedUnmarshallFailure bool samplePath string - expectedResourceAttrs func(seg *awsxray.Segment) pdata.Map + expectedResourceAttrs func(seg *awsxray.Segment) pcommon.Map propsPerSpan func(testCase string, t *testing.T, seg *awsxray.Segment) []perSpanProperties verification func(testCase string, actualSeg *awsxray.Segment, - expectedRs *pdata.ResourceSpans, - actualTraces *pdata.Traces, + expectedRs *ptrace.ResourceSpans, + actualTraces *ptrace.Traces, err error) }{ { testCase: "TranslateInstrumentedServerSegment", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "serverSample.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMapFromRaw(map[string]interface{}{ + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeCloudProvider: conventions.AttributeCloudProviderAWS, conventions.AttributeTelemetrySDKVersion: *seg.AWS.XRay.SDKVersion, conventions.AttributeTelemetrySDKName: *seg.AWS.XRay.SDK, @@ -98,9 +99,9 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindServer, + spanKind: ptrace.SpanKindServer, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, attrs: attrs, } @@ -108,7 +109,7 @@ func TestTranslation(t *testing.T) { }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), testCase+": one segment should translate to 1 ResourceSpans") @@ -120,8 +121,8 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateInstrumentedClientSegment", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "ddbSample.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMapFromRaw(map[string]interface{}{ + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeCloudProvider: conventions.AttributeCloudProviderAWS, conventions.AttributeTelemetrySDKVersion: *seg.AWS.XRay.SDKVersion, conventions.AttributeTelemetrySDKName: *seg.AWS.XRay.SDK, @@ -129,7 +130,7 @@ func TestTranslation(t *testing.T) { }) }, propsPerSpan: func(testCase string, t *testing.T, seg *awsxray.Segment) []perSpanProperties { - rootSpanAttrs := pdata.NewMap() + rootSpanAttrs := pcommon.NewMap() rootSpanAttrs.UpsertString(conventions.AttributeEnduserID, *seg.User) rootSpanEvts := initExceptionEvents(seg) assert.Len(t, rootSpanEvts, 1, testCase+": rootSpanEvts has incorrect size") @@ -139,9 +140,9 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindServer, + spanKind: ptrace.SpanKindServer, spanStatus: spanSt{ - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, }, eventsProps: rootSpanEvts, attrs: rootSpanAttrs, @@ -149,7 +150,7 @@ func TestTranslation(t *testing.T) { // this is the subsegment with ID that starts with 7df6 subseg7df6 := seg.Subsegments[0] - childSpan7df6Attrs := pdata.NewMap() + childSpan7df6Attrs := pcommon.NewMap() for k, v := range subseg7df6.Annotations { childSpan7df6Attrs.UpsertString(k, v.(string)) } @@ -168,16 +169,16 @@ func TestTranslation(t *testing.T) { name: *subseg7df6.Name, startTimeSec: *subseg7df6.StartTime, endTimeSec: subseg7df6.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, }, eventsProps: childSpan7df6Evts, attrs: childSpan7df6Attrs, } subseg7318 := seg.Subsegments[0].Subsegments[0] - childSpan7318Attrs := pdata.NewMapFromRaw(map[string]interface{}{ + childSpan7318Attrs := pcommon.NewMapFromRaw(map[string]interface{}{ awsxray.AWSServiceAttribute: *subseg7318.Name, conventions.AttributeHTTPResponseContentLength: int64(subseg7318.HTTP.Response.ContentLength.(float64)), conventions.AttributeHTTPStatusCode: *subseg7318.HTTP.Response.Status, @@ -195,9 +196,9 @@ func TestTranslation(t *testing.T) { name: *subseg7318.Name, startTimeSec: *subseg7318.StartTime, endTimeSec: subseg7318.EndTime, - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, attrs: childSpan7318Attrs, @@ -211,12 +212,12 @@ func TestTranslation(t *testing.T) { name: *subseg0239.Name, startTimeSec: *subseg0239.StartTime, endTimeSec: subseg0239.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subseg23cf := seg.Subsegments[0].Subsegments[0].Subsegments[1] @@ -227,16 +228,16 @@ func TestTranslation(t *testing.T) { name: *subseg23cf.Name, startTimeSec: *subseg23cf.StartTime, endTimeSec: subseg23cf.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subseg417b := seg.Subsegments[0].Subsegments[0].Subsegments[1].Subsegments[0] - childSpan417bAttrs := pdata.NewMap() + childSpan417bAttrs := pcommon.NewMap() for k, v := range subseg417b.Metadata { m, err := json.Marshal(v) assert.NoError(t, err, "metadata marshaling failed") @@ -249,16 +250,16 @@ func TestTranslation(t *testing.T) { name: *subseg417b.Name, startTimeSec: *subseg417b.StartTime, endTimeSec: subseg417b.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, attrs: childSpan417bAttrs, } subseg0cab := seg.Subsegments[0].Subsegments[0].Subsegments[1].Subsegments[0].Subsegments[0] - childSpan0cabAttrs := pdata.NewMap() + childSpan0cabAttrs := pcommon.NewMap() for k, v := range subseg0cab.Metadata { m, err := json.Marshal(v) assert.NoError(t, err, "metadata marshaling failed") @@ -271,16 +272,16 @@ func TestTranslation(t *testing.T) { name: *subseg0cab.Name, startTimeSec: *subseg0cab.StartTime, endTimeSec: subseg0cab.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, attrs: childSpan0cabAttrs, } subsegF8db := seg.Subsegments[0].Subsegments[0].Subsegments[1].Subsegments[0].Subsegments[1] - childSpanF8dbAttrs := pdata.NewMap() + childSpanF8dbAttrs := pcommon.NewMap() for k, v := range subsegF8db.Metadata { m, err := json.Marshal(v) assert.NoError(t, err, "metadata marshaling failed") @@ -293,16 +294,16 @@ func TestTranslation(t *testing.T) { name: *subsegF8db.Name, startTimeSec: *subsegF8db.StartTime, endTimeSec: subsegF8db.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, attrs: childSpanF8dbAttrs, } subsegE2de := seg.Subsegments[0].Subsegments[0].Subsegments[1].Subsegments[0].Subsegments[2] - childSpanE2deAttrs := pdata.NewMap() + childSpanE2deAttrs := pcommon.NewMap() for k, v := range subsegE2de.Metadata { m, err := json.Marshal(v) assert.NoError(t, err, "metadata marshaling failed") @@ -315,9 +316,9 @@ func TestTranslation(t *testing.T) { name: *subsegE2de.Name, startTimeSec: *subsegE2de.StartTime, endTimeSec: subsegE2de.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, attrs: childSpanE2deAttrs, @@ -331,12 +332,12 @@ func TestTranslation(t *testing.T) { name: *subsegA70b.Name, startTimeSec: *subsegA70b.StartTime, endTimeSec: subsegA70b.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subsegC053 := seg.Subsegments[0].Subsegments[0].Subsegments[1].Subsegments[2] @@ -347,12 +348,12 @@ func TestTranslation(t *testing.T) { name: *subsegC053.Name, startTimeSec: *subsegC053.StartTime, endTimeSec: subsegC053.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subseg5fca := seg.Subsegments[0].Subsegments[0].Subsegments[2] @@ -363,16 +364,16 @@ func TestTranslation(t *testing.T) { name: *subseg5fca.Name, startTimeSec: *subseg5fca.StartTime, endTimeSec: subseg5fca.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subseg7163 := seg.Subsegments[0].Subsegments[1] - childSpan7163Attrs := pdata.NewMapFromRaw(map[string]interface{}{ + childSpan7163Attrs := pcommon.NewMapFromRaw(map[string]interface{}{ awsxray.AWSServiceAttribute: *subseg7163.Name, conventions.AttributeHTTPStatusCode: *subseg7163.HTTP.Response.Status, conventions.AttributeHTTPResponseContentLength: int64(subseg7163.HTTP.Response.ContentLength.(float64)), @@ -392,9 +393,9 @@ func TestTranslation(t *testing.T) { name: *subseg7163.Name, startTimeSec: *subseg7163.StartTime, endTimeSec: subseg7163.EndTime, - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, spanStatus: spanSt{ - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, }, eventsProps: childSpan7163Evts, attrs: childSpan7163Attrs, @@ -408,12 +409,12 @@ func TestTranslation(t *testing.T) { name: *subseg9da0.Name, startTimeSec: *subseg9da0.StartTime, endTimeSec: subseg9da0.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subseg56b1 := seg.Subsegments[0].Subsegments[1].Subsegments[1] @@ -426,12 +427,12 @@ func TestTranslation(t *testing.T) { name: *subseg56b1.Name, startTimeSec: *subseg56b1.StartTime, endTimeSec: subseg56b1.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, }, eventsProps: childSpan56b1Evts, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subseg6f90 := seg.Subsegments[0].Subsegments[1].Subsegments[1].Subsegments[0] @@ -442,12 +443,12 @@ func TestTranslation(t *testing.T) { name: *subseg6f90.Name, startTimeSec: *subseg6f90.StartTime, endTimeSec: subseg6f90.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subsegAcfa := seg.Subsegments[0].Subsegments[1].Subsegments[1].Subsegments[1] @@ -458,12 +459,12 @@ func TestTranslation(t *testing.T) { name: *subsegAcfa.Name, startTimeSec: *subsegAcfa.StartTime, endTimeSec: subsegAcfa.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, eventsProps: nil, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } subsegBa8d := seg.Subsegments[0].Subsegments[1].Subsegments[2] @@ -476,12 +477,12 @@ func TestTranslation(t *testing.T) { name: *subsegBa8d.Name, startTimeSec: *subsegBa8d.StartTime, endTimeSec: subsegBa8d.EndTime, - spanKind: pdata.SpanKindInternal, + spanKind: ptrace.SpanKindInternal, spanStatus: spanSt{ - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, }, eventsProps: childSpanBa8dEvts, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } return []perSpanProperties{rootSpan, @@ -506,7 +507,7 @@ func TestTranslation(t *testing.T) { }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), "one segment should translate to 1 ResourceSpans") @@ -518,8 +519,8 @@ func TestTranslation(t *testing.T) { { testCase: "[aws] TranslateMissingAWSFieldSegment", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "awsMissingAwsField.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - attrs := pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + attrs := pcommon.NewMap() attrs.UpsertString(conventions.AttributeCloudProvider, "unknown") return attrs }, @@ -531,9 +532,9 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindServer, + spanKind: ptrace.SpanKindServer, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, attrs: attrs, } @@ -541,7 +542,7 @@ func TestTranslation(t *testing.T) { }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), testCase+": one segment should translate to 1 ResourceSpans") @@ -553,8 +554,8 @@ func TestTranslation(t *testing.T) { { testCase: "[aws] TranslateEC2AWSFieldsSegment", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "awsValidAwsFields.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMapFromRaw(map[string]interface{}{ + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeCloudProvider: conventions.AttributeCloudProviderAWS, conventions.AttributeCloudAccountID: *seg.AWS.AccountID, conventions.AttributeCloudAvailabilityZone: *seg.AWS.EC2.AvailabilityZone, @@ -580,9 +581,9 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindServer, + spanKind: ptrace.SpanKindServer, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, attrs: attrs, } @@ -590,7 +591,7 @@ func TestTranslation(t *testing.T) { }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), testCase+": one segment should translate to 1 ResourceSpans") @@ -602,8 +603,8 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateCauseIsExceptionId", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "minCauseIsExceptionId.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - attrs := pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + attrs := pcommon.NewMap() attrs.UpsertString(conventions.AttributeCloudProvider, "unknown") return attrs }, @@ -614,18 +615,18 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindServer, + spanKind: ptrace.SpanKindServer, spanStatus: spanSt{ message: *seg.Cause.ExceptionID, - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, }, - attrs: pdata.NewMap(), + attrs: pcommon.NewMap(), } return []perSpanProperties{res} }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), testCase+": one segment should translate to 1 ResourceSpans") @@ -637,15 +638,15 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateInvalidNamespace", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "invalidNamespace.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMap() }, propsPerSpan: func(_ string, _ *testing.T, seg *awsxray.Segment) []perSpanProperties { return nil }, verification: func(testCase string, actualSeg *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.EqualError(t, err, fmt.Sprintf("unexpected namespace: %s", *actualSeg.Subsegments[0].Subsegments[0].Namespace), @@ -655,13 +656,13 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateIndepSubsegment", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "indepSubsegment.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - attrs := pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + attrs := pcommon.NewMap() attrs.UpsertString(conventions.AttributeCloudProvider, "unknown") return attrs }, propsPerSpan: func(_ string, _ *testing.T, seg *awsxray.Segment) []perSpanProperties { - attrs := pdata.NewMapFromRaw(map[string]interface{}{ + attrs := pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPMethod: *seg.HTTP.Request.Method, conventions.AttributeHTTPStatusCode: *seg.HTTP.Response.Status, conventions.AttributeHTTPURL: *seg.HTTP.Request.URL, @@ -675,9 +676,9 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, attrs: attrs, } @@ -685,7 +686,7 @@ func TestTranslation(t *testing.T) { }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), testCase+": one segment should translate to 1 ResourceSpans") @@ -697,13 +698,13 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateIndepSubsegmentForContentLengthString", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "indepSubsegmentWithContentLengthString.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - attrs := pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + attrs := pcommon.NewMap() attrs.UpsertString(conventions.AttributeCloudProvider, "unknown") return attrs }, propsPerSpan: func(_ string, _ *testing.T, seg *awsxray.Segment) []perSpanProperties { - attrs := pdata.NewMapFromRaw(map[string]interface{}{ + attrs := pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeHTTPMethod: *seg.HTTP.Request.Method, conventions.AttributeHTTPStatusCode: *seg.HTTP.Response.Status, conventions.AttributeHTTPURL: *seg.HTTP.Request.URL, @@ -718,9 +719,9 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, attrs: attrs, } @@ -728,7 +729,7 @@ func TestTranslation(t *testing.T) { }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), testCase+": one segment should translate to 1 ResourceSpans") @@ -740,13 +741,13 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateSql", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "indepSubsegmentWithSql.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - attrs := pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + attrs := pcommon.NewMap() attrs.UpsertString(conventions.AttributeCloudProvider, "unknown") return attrs }, propsPerSpan: func(_ string, _ *testing.T, seg *awsxray.Segment) []perSpanProperties { - attrs := pdata.NewMapFromRaw(map[string]interface{}{ + attrs := pcommon.NewMapFromRaw(map[string]interface{}{ conventions.AttributeDBConnectionString: "jdbc:postgresql://aawijb5u25wdoy.cpamxznpdoq8.us-west-2." + "rds.amazonaws.com:5432", conventions.AttributeDBName: "ebdb", @@ -761,9 +762,9 @@ func TestTranslation(t *testing.T) { name: *seg.Name, startTimeSec: *seg.StartTime, endTimeSec: seg.EndTime, - spanKind: pdata.SpanKindClient, + spanKind: ptrace.SpanKindClient, spanStatus: spanSt{ - code: pdata.StatusCodeUnset, + code: ptrace.StatusCodeUnset, }, attrs: attrs, } @@ -771,7 +772,7 @@ func TestTranslation(t *testing.T) { }, verification: func(testCase string, _ *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.NoError(t, err, testCase+": translation should've succeeded") assert.Equal(t, 1, actualTraces.ResourceSpans().Len(), testCase+": one segment should translate to 1 ResourceSpans") @@ -783,15 +784,15 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateInvalidSqlUrl", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "indepSubsegmentWithInvalidSqlUrl.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMap() }, propsPerSpan: func(_ string, _ *testing.T, seg *awsxray.Segment) []perSpanProperties { return nil }, verification: func(testCase string, actualSeg *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.EqualError(t, err, fmt.Sprintf( "failed to parse out the database name in the \"sql.url\" field, rawUrl: %s", @@ -804,15 +805,15 @@ func TestTranslation(t *testing.T) { testCase: "TranslateJsonUnmarshallFailed", expectedUnmarshallFailure: true, samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "minCauseIsInvalid.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMap() }, propsPerSpan: func(_ string, _ *testing.T, seg *awsxray.Segment) []perSpanProperties { return nil }, verification: func(testCase string, actualSeg *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.EqualError(t, err, fmt.Sprintf( "the value assigned to the `cause` field does not appear to be a string: %v", @@ -824,15 +825,15 @@ func TestTranslation(t *testing.T) { { testCase: "TranslateRootSegValidationFailed", samplePath: filepath.Join("../../../../internal/aws/xray", "testdata", "segmentValidationFailed.txt"), - expectedResourceAttrs: func(seg *awsxray.Segment) pdata.Map { - return pdata.NewMap() + expectedResourceAttrs: func(seg *awsxray.Segment) pcommon.Map { + return pcommon.NewMap() }, propsPerSpan: func(_ string, _ *testing.T, seg *awsxray.Segment) []perSpanProperties { return nil }, verification: func(testCase string, actualSeg *awsxray.Segment, - expectedRs *pdata.ResourceSpans, actualTraces *pdata.Traces, err error) { + expectedRs *ptrace.ResourceSpans, actualTraces *ptrace.Traces, err error) { assert.EqualError(t, err, `segment "start_time" can not be nil`, testCase+": translation should've failed") }, @@ -846,7 +847,7 @@ func TestTranslation(t *testing.T) { var ( actualSeg awsxray.Segment - expectedRs *pdata.ResourceSpans + expectedRs *ptrace.ResourceSpans ) if !tc.expectedUnmarshallFailure { err = json.Unmarshal(content, &actualSeg) @@ -875,7 +876,7 @@ func TestTranslation(t *testing.T) { func initExceptionEvents(expectedSeg *awsxray.Segment) []eventProps { res := make([]eventProps, 0, len(expectedSeg.Cause.Exceptions)) for _, excp := range expectedSeg.Cause.Exceptions { - attrs := pdata.NewMap() + attrs := pcommon.NewMap() attrs.UpsertString(awsxray.AWSXrayExceptionIDAttribute, *excp.ID) if excp.Message != nil { attrs.UpsertString(conventions.AttributeExceptionMessage, *excp.Message) @@ -913,14 +914,14 @@ func initExceptionEvents(expectedSeg *awsxray.Segment) []eventProps { } func initResourceSpans(expectedSeg *awsxray.Segment, - resourceAttrs pdata.Map, + resourceAttrs pcommon.Map, propsPerSpan []perSpanProperties, -) *pdata.ResourceSpans { +) *ptrace.ResourceSpans { if expectedSeg == nil { return nil } - rs := pdata.NewResourceSpans() + rs := ptrace.NewResourceSpans() if resourceAttrs.Len() > 0 { resourceAttrs.CopyTo(rs.Resource().Attributes()) @@ -939,19 +940,19 @@ func initResourceSpans(expectedSeg *awsxray.Segment, for _, props := range propsPerSpan { sp := ls.Spans().AppendEmpty() spanIDBytes, _ := decodeXRaySpanID(&props.spanID) - sp.SetSpanID(pdata.NewSpanID(spanIDBytes)) + sp.SetSpanID(pcommon.NewSpanID(spanIDBytes)) if props.parentSpanID != nil { parentIDBytes, _ := decodeXRaySpanID(props.parentSpanID) - sp.SetParentSpanID(pdata.NewSpanID(parentIDBytes)) + sp.SetParentSpanID(pcommon.NewSpanID(parentIDBytes)) } sp.SetName(props.name) - sp.SetStartTimestamp(pdata.Timestamp(props.startTimeSec * float64(time.Second))) + sp.SetStartTimestamp(pcommon.Timestamp(props.startTimeSec * float64(time.Second))) if props.endTimeSec != nil { - sp.SetEndTimestamp(pdata.Timestamp(*props.endTimeSec * float64(time.Second))) + sp.SetEndTimestamp(pcommon.Timestamp(*props.endTimeSec * float64(time.Second))) } sp.SetKind(props.spanKind) traceIDBytes, _ := decodeXRayTraceID(&props.traceID) - sp.SetTraceID(pdata.NewTraceID(traceIDBytes)) + sp.SetTraceID(pcommon.NewTraceID(traceIDBytes)) sp.Status().SetMessage(props.spanStatus.message) sp.Status().SetCode(props.spanStatus.code) @@ -985,7 +986,7 @@ func initResourceSpans(expectedSeg *awsxray.Segment, // this function performs the same equality verification, then clears // up all the attribute. // The reason for doing so is just to be able to use deep equal via assert.Equal() -func compare2ResourceSpans(t *testing.T, testCase string, exp, act *pdata.ResourceSpans) { +func compare2ResourceSpans(t *testing.T, testCase string, exp, act *ptrace.ResourceSpans) { assert.Equal(t, exp.ScopeSpans().Len(), act.ScopeSpans().Len(), testCase+": ScopeSpans.Len() differ") diff --git a/receiver/carbonreceiver/go.mod b/receiver/carbonreceiver/go.mod index 17b016e1b2ca..8790f92df083 100644 --- a/receiver/carbonreceiver/go.mod +++ b/receiver/carbonreceiver/go.mod @@ -8,7 +8,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 @@ -21,21 +21,21 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/otel/sdk v1.6.3 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) @@ -45,3 +45,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/carbonreceiver/go.sum b/receiver/carbonreceiver/go.sum index 999a4cd993dc..6cf6428c8f1b 100644 --- a/receiver/carbonreceiver/go.sum +++ b/receiver/carbonreceiver/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -74,7 +74,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -105,8 +104,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -147,8 +146,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -163,20 +160,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= -go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= -go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -210,7 +207,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -233,8 +230,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/cloudfoundryreceiver/converter.go b/receiver/cloudfoundryreceiver/converter.go index 868f1f3fdb31..c16d7b89a223 100644 --- a/receiver/cloudfoundryreceiver/converter.go +++ b/receiver/cloudfoundryreceiver/converter.go @@ -18,42 +18,43 @@ import ( "time" "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) const ( attributeNamePrefix = "org.cloudfoundry." ) -func convertEnvelopeToMetrics(envelope *loggregator_v2.Envelope, metricSlice pdata.MetricSlice, startTime time.Time) { +func convertEnvelopeToMetrics(envelope *loggregator_v2.Envelope, metricSlice pmetric.MetricSlice, startTime time.Time) { namePrefix := envelope.Tags["origin"] + "." switch message := envelope.Message.(type) { case *loggregator_v2.Envelope_Log: case *loggregator_v2.Envelope_Counter: metric := metricSlice.AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.SetName(namePrefix + message.Counter.GetName()) dataPoint := metric.Sum().DataPoints().AppendEmpty() dataPoint.SetDoubleVal(float64(message.Counter.GetTotal())) - dataPoint.SetTimestamp(pdata.Timestamp(envelope.GetTimestamp())) - dataPoint.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) + dataPoint.SetTimestamp(pcommon.Timestamp(envelope.GetTimestamp())) + dataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) copyEnvelopeAttributes(dataPoint.Attributes(), envelope) case *loggregator_v2.Envelope_Gauge: for name, value := range message.Gauge.GetMetrics() { metric := metricSlice.AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetName(namePrefix + name) dataPoint := metric.Gauge().DataPoints().AppendEmpty() dataPoint.SetDoubleVal(value.Value) - dataPoint.SetTimestamp(pdata.Timestamp(envelope.GetTimestamp())) - dataPoint.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) + dataPoint.SetTimestamp(pcommon.Timestamp(envelope.GetTimestamp())) + dataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) copyEnvelopeAttributes(dataPoint.Attributes(), envelope) } } } -func copyEnvelopeAttributes(attributes pdata.Map, envelope *loggregator_v2.Envelope) { +func copyEnvelopeAttributes(attributes pcommon.Map, envelope *loggregator_v2.Envelope) { for key, value := range envelope.Tags { attributes.InsertString(attributeNamePrefix+key, value) } diff --git a/receiver/cloudfoundryreceiver/converter_test.go b/receiver/cloudfoundryreceiver/converter_test.go index 871f3fd9adb4..fdd15809f4e1 100644 --- a/receiver/cloudfoundryreceiver/converter_test.go +++ b/receiver/cloudfoundryreceiver/converter_test.go @@ -21,7 +21,8 @@ import ( "code.cloudfoundry.org/go-loggregator/rpc/loggregator_v2" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestConvertCountEnvelope(t *testing.T) { @@ -47,7 +48,7 @@ func TestConvertCountEnvelope(t *testing.T) { }, } - metricSlice := pdata.NewMetricSlice() + metricSlice := pmetric.NewMetricSlice() convertEnvelopeToMetrics(&envelope, metricSlice, before) @@ -55,12 +56,12 @@ func TestConvertCountEnvelope(t *testing.T) { metric := metricSlice.At(0) assert.Equal(t, "gorouter.bad_gateways", metric.Name()) - assert.Equal(t, pdata.MetricDataTypeSum, metric.DataType()) + assert.Equal(t, pmetric.MetricDataTypeSum, metric.DataType()) dataPoints := metric.Sum().DataPoints() assert.Equal(t, 1, dataPoints.Len()) dataPoint := dataPoints.At(0) - assert.Equal(t, pdata.NewTimestampFromTime(now), dataPoint.Timestamp()) - assert.Equal(t, pdata.NewTimestampFromTime(before), dataPoint.StartTimestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(now), dataPoint.Timestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(before), dataPoint.StartTimestamp()) assert.Equal(t, 10.0, dataPoint.DoubleVal()) assertAttributes(t, dataPoint.Attributes(), map[string]string{ @@ -120,7 +121,7 @@ func TestConvertGaugeEnvelope(t *testing.T) { "org.cloudfoundry.ip": "10.0.4.8", } - metricSlice := pdata.NewMetricSlice() + metricSlice := pmetric.NewMetricSlice() convertEnvelopeToMetrics(&envelope, metricSlice, before) @@ -133,26 +134,26 @@ func TestConvertGaugeEnvelope(t *testing.T) { metric := metricSlice.At(memoryMetricPosition) assert.Equal(t, "rep.memory", metric.Name()) - assert.Equal(t, pdata.MetricDataTypeGauge, metric.DataType()) + assert.Equal(t, pmetric.MetricDataTypeGauge, metric.DataType()) assert.Equal(t, 1, metric.Gauge().DataPoints().Len()) dataPoint := metric.Gauge().DataPoints().At(0) - assert.Equal(t, pdata.NewTimestampFromTime(now), dataPoint.Timestamp()) - assert.Equal(t, pdata.NewTimestampFromTime(before), dataPoint.StartTimestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(now), dataPoint.Timestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(before), dataPoint.StartTimestamp()) assert.Equal(t, 17046641.0, dataPoint.DoubleVal()) assertAttributes(t, dataPoint.Attributes(), expectedAttributes) metric = metricSlice.At(1 - memoryMetricPosition) assert.Equal(t, "rep.disk", metric.Name()) - assert.Equal(t, pdata.MetricDataTypeGauge, metric.DataType()) + assert.Equal(t, pmetric.MetricDataTypeGauge, metric.DataType()) assert.Equal(t, 1, metric.Gauge().DataPoints().Len()) dataPoint = metric.Gauge().DataPoints().At(0) - assert.Equal(t, pdata.NewTimestampFromTime(now), dataPoint.Timestamp()) - assert.Equal(t, pdata.NewTimestampFromTime(before), dataPoint.StartTimestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(now), dataPoint.Timestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(before), dataPoint.StartTimestamp()) assert.Equal(t, 10231808.0, dataPoint.DoubleVal()) assertAttributes(t, dataPoint.Attributes(), expectedAttributes) } -func assertAttributes(t *testing.T, attributes pdata.Map, expected map[string]string) { +func assertAttributes(t *testing.T, attributes pcommon.Map, expected map[string]string) { assert.Equal(t, len(expected), attributes.Len()) for key, expectedValue := range expected { diff --git a/receiver/cloudfoundryreceiver/go.mod b/receiver/cloudfoundryreceiver/go.mod index 32112e7ce272..31f13e3f6481 100644 --- a/receiver/cloudfoundryreceiver/go.mod +++ b/receiver/cloudfoundryreceiver/go.mod @@ -6,8 +6,8 @@ require ( code.cloudfoundry.org/go-loggregator v7.4.0+incompatible github.com/cloudfoundry-incubator/uaago v0.0.0-20190307164349-8136b7bbe76e github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -23,7 +23,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -34,7 +34,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -42,8 +41,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -52,3 +51,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/cloudfoundryreceiver/go.sum b/receiver/cloudfoundryreceiver/go.sum index 65033f849369..1140733ed9de 100644 --- a/receiver/cloudfoundryreceiver/go.sum +++ b/receiver/cloudfoundryreceiver/go.sum @@ -24,7 +24,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -102,7 +102,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -135,8 +134,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -200,8 +199,6 @@ github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -217,10 +214,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -231,7 +228,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -273,8 +270,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -304,8 +301,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/cloudfoundryreceiver/receiver.go b/receiver/cloudfoundryreceiver/receiver.go index c465e01cdb8a..ebd6066471ab 100644 --- a/receiver/cloudfoundryreceiver/receiver.go +++ b/receiver/cloudfoundryreceiver/receiver.go @@ -25,8 +25,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/pmetric" ) const ( @@ -138,7 +138,7 @@ func (cfr *cloudFoundryReceiver) streamMetrics( break } - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() libraryMetrics := createLibraryMetricsSlice(metrics) for _, envelope := range envelopes { @@ -157,7 +157,7 @@ func (cfr *cloudFoundryReceiver) streamMetrics( } } -func createLibraryMetricsSlice(metrics pdata.Metrics) pdata.MetricSlice { +func createLibraryMetricsSlice(metrics pmetric.Metrics) pmetric.MetricSlice { resourceMetrics := metrics.ResourceMetrics() resourceMetric := resourceMetrics.AppendEmpty() resourceMetric.Resource().Attributes() diff --git a/receiver/collectdreceiver/go.mod b/receiver/collectdreceiver/go.mod index d8665f1c8e2d..b103fc5c0258 100644 --- a/receiver/collectdreceiver/go.mod +++ b/receiver/collectdreceiver/go.mod @@ -8,7 +8,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 ) @@ -19,21 +19,21 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -46,3 +46,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/collectdreceiver/go.sum b/receiver/collectdreceiver/go.sum index 552103ace226..6577af7da9d7 100644 --- a/receiver/collectdreceiver/go.sum +++ b/receiver/collectdreceiver/go.sum @@ -18,7 +18,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -91,7 +91,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -123,8 +122,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -167,8 +166,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -184,10 +181,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -230,8 +229,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -255,8 +254,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/couchbasereceiver/go.mod b/receiver/couchbasereceiver/go.mod index f27f54d9dc05..c6584a05fc6f 100644 --- a/receiver/couchbasereceiver/go.mod +++ b/receiver/couchbasereceiver/go.mod @@ -4,13 +4,14 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 ) -require go.opentelemetry.io/collector/model v0.48.0 - -require go.uber.org/zap v1.21.0 +require ( + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + go.uber.org/zap v1.21.0 +) require ( github.com/davecgh/go-spew v1.1.1 // indirect @@ -22,7 +23,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -31,15 +32,18 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/couchbasereceiver/go.sum b/receiver/couchbasereceiver/go.sum index 3812d26d40e9..746036bf75b7 100644 --- a/receiver/couchbasereceiver/go.sum +++ b/receiver/couchbasereceiver/go.sum @@ -118,8 +118,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -167,8 +167,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -184,10 +182,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -197,7 +195,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -236,6 +234,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -260,11 +260,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go b/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go index da1647e677eb..5fa4fd307632 100644 --- a/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/couchbasereceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -24,17 +25,17 @@ func DefaultMetricsSettings() MetricsSettings { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. } // metricBuilderOption applies changes to default metrics builder. type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -42,8 +43,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), } for _, op := range options { op(mb) @@ -52,7 +53,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -62,14 +63,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -86,9 +87,9 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } @@ -96,7 +97,7 @@ func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/couchdbreceiver/go.mod b/receiver/couchdbreceiver/go.mod index d5a561044f73..90d1a98f85d6 100644 --- a/receiver/couchdbreceiver/go.mod +++ b/receiver/couchdbreceiver/go.mod @@ -3,7 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/couchd go 1.17 require ( - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 ) @@ -11,7 +11,7 @@ require github.com/stretchr/testify v1.7.1 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -25,7 +25,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -35,7 +35,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect @@ -43,10 +42,8 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect @@ -54,3 +51,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/couchdbreceiver/go.sum b/receiver/couchdbreceiver/go.sum index c11fd57ca165..d3e22cca8022 100644 --- a/receiver/couchdbreceiver/go.sum +++ b/receiver/couchdbreceiver/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -19,13 +18,11 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -37,7 +34,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -122,8 +118,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -170,9 +166,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -190,10 +183,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -203,7 +196,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -242,8 +235,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -268,13 +261,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -299,8 +290,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -310,7 +299,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -324,7 +312,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go b/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go index 8fa829b593ef..0243aaa217fc 100644 --- a/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/couchdbreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -55,7 +56,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricCouchdbAverageRequestTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -65,10 +66,10 @@ func (m *metricCouchdbAverageRequestTime) init() { m.data.SetName("couchdb.average_request_time") m.data.SetDescription("The average duration of a served request.") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricCouchdbAverageRequestTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricCouchdbAverageRequestTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -86,7 +87,7 @@ func (m *metricCouchdbAverageRequestTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbAverageRequestTime) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbAverageRequestTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -97,14 +98,14 @@ func (m *metricCouchdbAverageRequestTime) emit(metrics pdata.MetricSlice) { func newMetricCouchdbAverageRequestTime(settings MetricSettings) metricCouchdbAverageRequestTime { m := metricCouchdbAverageRequestTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricCouchdbDatabaseOpen struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -114,12 +115,12 @@ func (m *metricCouchdbDatabaseOpen) init() { m.data.SetName("couchdb.database.open") m.data.SetDescription("The number of open databases.") m.data.SetUnit("{databases}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricCouchdbDatabaseOpen) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricCouchdbDatabaseOpen) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -137,7 +138,7 @@ func (m *metricCouchdbDatabaseOpen) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbDatabaseOpen) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbDatabaseOpen) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -148,14 +149,14 @@ func (m *metricCouchdbDatabaseOpen) emit(metrics pdata.MetricSlice) { func newMetricCouchdbDatabaseOpen(settings MetricSettings) metricCouchdbDatabaseOpen { m := metricCouchdbDatabaseOpen{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricCouchdbDatabaseOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -165,13 +166,13 @@ func (m *metricCouchdbDatabaseOperations) init() { m.data.SetName("couchdb.database.operations") m.data.SetDescription("The number of database operations.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricCouchdbDatabaseOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (m *metricCouchdbDatabaseOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.settings.Enabled { return } @@ -179,7 +180,7 @@ func (m *metricCouchdbDatabaseOperations) recordDataPoint(start pdata.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -190,7 +191,7 @@ func (m *metricCouchdbDatabaseOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbDatabaseOperations) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbDatabaseOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -201,14 +202,14 @@ func (m *metricCouchdbDatabaseOperations) emit(metrics pdata.MetricSlice) { func newMetricCouchdbDatabaseOperations(settings MetricSettings) metricCouchdbDatabaseOperations { m := metricCouchdbDatabaseOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricCouchdbFileDescriptorOpen struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -218,12 +219,12 @@ func (m *metricCouchdbFileDescriptorOpen) init() { m.data.SetName("couchdb.file_descriptor.open") m.data.SetDescription("The number of open file descriptors.") m.data.SetUnit("{files}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricCouchdbFileDescriptorOpen) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricCouchdbFileDescriptorOpen) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -241,7 +242,7 @@ func (m *metricCouchdbFileDescriptorOpen) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbFileDescriptorOpen) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbFileDescriptorOpen) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -252,14 +253,14 @@ func (m *metricCouchdbFileDescriptorOpen) emit(metrics pdata.MetricSlice) { func newMetricCouchdbFileDescriptorOpen(settings MetricSettings) metricCouchdbFileDescriptorOpen { m := metricCouchdbFileDescriptorOpen{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricCouchdbHttpdBulkRequests struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -269,12 +270,12 @@ func (m *metricCouchdbHttpdBulkRequests) init() { m.data.SetName("couchdb.httpd.bulk_requests") m.data.SetDescription("The number of bulk requests.") m.data.SetUnit("{requests}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricCouchdbHttpdBulkRequests) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricCouchdbHttpdBulkRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -292,7 +293,7 @@ func (m *metricCouchdbHttpdBulkRequests) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbHttpdBulkRequests) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbHttpdBulkRequests) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -303,14 +304,14 @@ func (m *metricCouchdbHttpdBulkRequests) emit(metrics pdata.MetricSlice) { func newMetricCouchdbHttpdBulkRequests(settings MetricSettings) metricCouchdbHttpdBulkRequests { m := metricCouchdbHttpdBulkRequests{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricCouchdbHttpdRequests struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -320,13 +321,13 @@ func (m *metricCouchdbHttpdRequests) init() { m.data.SetName("couchdb.httpd.requests") m.data.SetDescription("The number of HTTP requests by method.") m.data.SetUnit("{requests}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricCouchdbHttpdRequests) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, httpMethodAttributeValue string) { +func (m *metricCouchdbHttpdRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, httpMethodAttributeValue string) { if !m.settings.Enabled { return } @@ -334,7 +335,7 @@ func (m *metricCouchdbHttpdRequests) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.HTTPMethod, pdata.NewValueString(httpMethodAttributeValue)) + dp.Attributes().Insert(A.HTTPMethod, pcommon.NewValueString(httpMethodAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -345,7 +346,7 @@ func (m *metricCouchdbHttpdRequests) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbHttpdRequests) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbHttpdRequests) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -356,14 +357,14 @@ func (m *metricCouchdbHttpdRequests) emit(metrics pdata.MetricSlice) { func newMetricCouchdbHttpdRequests(settings MetricSettings) metricCouchdbHttpdRequests { m := metricCouchdbHttpdRequests{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricCouchdbHttpdResponses struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -373,13 +374,13 @@ func (m *metricCouchdbHttpdResponses) init() { m.data.SetName("couchdb.httpd.responses") m.data.SetDescription("The number of each HTTP status code.") m.data.SetUnit("{responses}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricCouchdbHttpdResponses) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, httpStatusCodeAttributeValue string) { +func (m *metricCouchdbHttpdResponses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, httpStatusCodeAttributeValue string) { if !m.settings.Enabled { return } @@ -387,7 +388,7 @@ func (m *metricCouchdbHttpdResponses) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.HTTPStatusCode, pdata.NewValueString(httpStatusCodeAttributeValue)) + dp.Attributes().Insert(A.HTTPStatusCode, pcommon.NewValueString(httpStatusCodeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -398,7 +399,7 @@ func (m *metricCouchdbHttpdResponses) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbHttpdResponses) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbHttpdResponses) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -409,14 +410,14 @@ func (m *metricCouchdbHttpdResponses) emit(metrics pdata.MetricSlice) { func newMetricCouchdbHttpdResponses(settings MetricSettings) metricCouchdbHttpdResponses { m := metricCouchdbHttpdResponses{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricCouchdbHttpdViews struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -426,13 +427,13 @@ func (m *metricCouchdbHttpdViews) init() { m.data.SetName("couchdb.httpd.views") m.data.SetDescription("The number of views read.") m.data.SetUnit("{views}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricCouchdbHttpdViews) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, viewAttributeValue string) { +func (m *metricCouchdbHttpdViews) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, viewAttributeValue string) { if !m.settings.Enabled { return } @@ -440,7 +441,7 @@ func (m *metricCouchdbHttpdViews) recordDataPoint(start pdata.Timestamp, ts pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.View, pdata.NewValueString(viewAttributeValue)) + dp.Attributes().Insert(A.View, pcommon.NewValueString(viewAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -451,7 +452,7 @@ func (m *metricCouchdbHttpdViews) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricCouchdbHttpdViews) emit(metrics pdata.MetricSlice) { +func (m *metricCouchdbHttpdViews) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -462,7 +463,7 @@ func (m *metricCouchdbHttpdViews) emit(metrics pdata.MetricSlice) { func newMetricCouchdbHttpdViews(settings MetricSettings) metricCouchdbHttpdViews { m := metricCouchdbHttpdViews{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -471,10 +472,10 @@ func newMetricCouchdbHttpdViews(settings MetricSettings) metricCouchdbHttpdViews // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricCouchdbAverageRequestTime metricCouchdbAverageRequestTime metricCouchdbDatabaseOpen metricCouchdbDatabaseOpen metricCouchdbDatabaseOperations metricCouchdbDatabaseOperations @@ -489,7 +490,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -497,8 +498,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricCouchdbAverageRequestTime: newMetricCouchdbAverageRequestTime(settings.CouchdbAverageRequestTime), metricCouchdbDatabaseOpen: newMetricCouchdbDatabaseOpen(settings.CouchdbDatabaseOpen), metricCouchdbDatabaseOperations: newMetricCouchdbDatabaseOperations(settings.CouchdbDatabaseOperations), @@ -515,7 +516,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -525,11 +526,11 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // WithCouchdbNodeName sets provided value as "couchdb.node.name" attribute for current resource. func WithCouchdbNodeName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("couchdb.node.name", val) } } @@ -539,7 +540,7 @@ func WithCouchdbNodeName(val string) ResourceOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -564,57 +565,57 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordCouchdbAverageRequestTimeDataPoint adds a data point to couchdb.average_request_time metric. -func (mb *MetricsBuilder) RecordCouchdbAverageRequestTimeDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordCouchdbAverageRequestTimeDataPoint(ts pcommon.Timestamp, val float64) { mb.metricCouchdbAverageRequestTime.recordDataPoint(mb.startTime, ts, val) } // RecordCouchdbDatabaseOpenDataPoint adds a data point to couchdb.database.open metric. -func (mb *MetricsBuilder) RecordCouchdbDatabaseOpenDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordCouchdbDatabaseOpenDataPoint(ts pcommon.Timestamp, val int64) { mb.metricCouchdbDatabaseOpen.recordDataPoint(mb.startTime, ts, val) } // RecordCouchdbDatabaseOperationsDataPoint adds a data point to couchdb.database.operations metric. -func (mb *MetricsBuilder) RecordCouchdbDatabaseOperationsDataPoint(ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (mb *MetricsBuilder) RecordCouchdbDatabaseOperationsDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) { mb.metricCouchdbDatabaseOperations.recordDataPoint(mb.startTime, ts, val, operationAttributeValue) } // RecordCouchdbFileDescriptorOpenDataPoint adds a data point to couchdb.file_descriptor.open metric. -func (mb *MetricsBuilder) RecordCouchdbFileDescriptorOpenDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordCouchdbFileDescriptorOpenDataPoint(ts pcommon.Timestamp, val int64) { mb.metricCouchdbFileDescriptorOpen.recordDataPoint(mb.startTime, ts, val) } // RecordCouchdbHttpdBulkRequestsDataPoint adds a data point to couchdb.httpd.bulk_requests metric. -func (mb *MetricsBuilder) RecordCouchdbHttpdBulkRequestsDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordCouchdbHttpdBulkRequestsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricCouchdbHttpdBulkRequests.recordDataPoint(mb.startTime, ts, val) } // RecordCouchdbHttpdRequestsDataPoint adds a data point to couchdb.httpd.requests metric. -func (mb *MetricsBuilder) RecordCouchdbHttpdRequestsDataPoint(ts pdata.Timestamp, val int64, httpMethodAttributeValue string) { +func (mb *MetricsBuilder) RecordCouchdbHttpdRequestsDataPoint(ts pcommon.Timestamp, val int64, httpMethodAttributeValue string) { mb.metricCouchdbHttpdRequests.recordDataPoint(mb.startTime, ts, val, httpMethodAttributeValue) } // RecordCouchdbHttpdResponsesDataPoint adds a data point to couchdb.httpd.responses metric. -func (mb *MetricsBuilder) RecordCouchdbHttpdResponsesDataPoint(ts pdata.Timestamp, val int64, httpStatusCodeAttributeValue string) { +func (mb *MetricsBuilder) RecordCouchdbHttpdResponsesDataPoint(ts pcommon.Timestamp, val int64, httpStatusCodeAttributeValue string) { mb.metricCouchdbHttpdResponses.recordDataPoint(mb.startTime, ts, val, httpStatusCodeAttributeValue) } // RecordCouchdbHttpdViewsDataPoint adds a data point to couchdb.httpd.views metric. -func (mb *MetricsBuilder) RecordCouchdbHttpdViewsDataPoint(ts pdata.Timestamp, val int64, viewAttributeValue string) { +func (mb *MetricsBuilder) RecordCouchdbHttpdViewsDataPoint(ts pcommon.Timestamp, val int64, viewAttributeValue string) { mb.metricCouchdbHttpdViews.recordDataPoint(mb.startTime, ts, val, viewAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/couchdbreceiver/metrics.go b/receiver/couchdbreceiver/metrics.go index 403413d066d0..8c047c8e7fe2 100644 --- a/receiver/couchdbreceiver/metrics.go +++ b/receiver/couchdbreceiver/metrics.go @@ -17,13 +17,13 @@ package couchdbreceiver // import "github.com/open-telemetry/opentelemetry-colle import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/couchdbreceiver/internal/metadata" ) -func (c *couchdbScraper) recordCouchdbAverageRequestTimeDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbAverageRequestTimeDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { averageRequestTimeMetricKey := []string{"request_time", "value", "arithmetic_mean"} averageRequestTimeValue, err := getValueFromBody(averageRequestTimeMetricKey, stats) if err != nil { @@ -39,7 +39,7 @@ func (c *couchdbScraper) recordCouchdbAverageRequestTimeDataPoint(now pdata.Time c.mb.RecordCouchdbAverageRequestTimeDataPoint(now, parsedValue) } -func (c *couchdbScraper) recordCouchdbHttpdBulkRequestsDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbHttpdBulkRequestsDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { httpdBulkRequestsMetricKey := []string{"httpd", "bulk_requests", "value"} httpdBulkRequestsMetricValue, err := getValueFromBody(httpdBulkRequestsMetricKey, stats) if err != nil { @@ -55,7 +55,7 @@ func (c *couchdbScraper) recordCouchdbHttpdBulkRequestsDataPoint(now pdata.Times c.mb.RecordCouchdbHttpdBulkRequestsDataPoint(now, parsedValue) } -func (c *couchdbScraper) recordCouchdbHttpdRequestsDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbHttpdRequestsDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { methods := []string{metadata.AttributeHTTPMethod.COPY, metadata.AttributeHTTPMethod.DELETE, metadata.AttributeHTTPMethod.GET, metadata.AttributeHTTPMethod.HEAD, metadata.AttributeHTTPMethod.OPTIONS, metadata.AttributeHTTPMethod.POST, metadata.AttributeHTTPMethod.PUT} for _, method := range methods { httpdRequestMethodKey := []string{"httpd_request_methods", method, "value"} @@ -74,7 +74,7 @@ func (c *couchdbScraper) recordCouchdbHttpdRequestsDataPoint(now pdata.Timestamp } } -func (c *couchdbScraper) recordCouchdbHttpdResponsesDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbHttpdResponsesDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { codes := []string{"200", "201", "202", "204", "206", "301", "302", "304", "400", "401", "403", "404", "405", "406", "409", "412", "413", "414", "415", "416", "417", "500", "501", "503"} for _, code := range codes { httpdResponsetCodeKey := []string{"httpd_status_codes", code, "value"} @@ -93,7 +93,7 @@ func (c *couchdbScraper) recordCouchdbHttpdResponsesDataPoint(now pdata.Timestam } } -func (c *couchdbScraper) recordCouchdbHttpdViewsDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbHttpdViewsDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { views := []string{metadata.AttributeView.TemporaryViewReads, metadata.AttributeView.ViewReads} for _, view := range views { viewKey := []string{"httpd", view, "value"} @@ -112,7 +112,7 @@ func (c *couchdbScraper) recordCouchdbHttpdViewsDataPoint(now pdata.Timestamp, s } } -func (c *couchdbScraper) recordCouchdbDatabaseOpenDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbDatabaseOpenDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { openDatabaseKey := []string{"open_databases", "value"} openDatabaseMetricValue, err := getValueFromBody(openDatabaseKey, stats) if err != nil { @@ -128,7 +128,7 @@ func (c *couchdbScraper) recordCouchdbDatabaseOpenDataPoint(now pdata.Timestamp, c.mb.RecordCouchdbDatabaseOpenDataPoint(now, parsedValue) } -func (c *couchdbScraper) recordCouchdbFileDescriptorOpenDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbFileDescriptorOpenDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { fileDescriptorKey := []string{"open_os_files", "value"} fileDescriptorMetricValue, err := getValueFromBody(fileDescriptorKey, stats) if err != nil { @@ -144,7 +144,7 @@ func (c *couchdbScraper) recordCouchdbFileDescriptorOpenDataPoint(now pdata.Time c.mb.RecordCouchdbFileDescriptorOpenDataPoint(now, parsedValue) } -func (c *couchdbScraper) recordCouchdbDatabaseOperationsDataPoint(now pdata.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { +func (c *couchdbScraper) recordCouchdbDatabaseOperationsDataPoint(now pcommon.Timestamp, stats map[string]interface{}, errors scrapererror.ScrapeErrors) { operations := []string{metadata.AttributeOperation.Reads, metadata.AttributeOperation.Writes} keyPaths := [][]string{{"database_reads", "value"}, {"database_writes", "value"}} for i := 0; i < len(operations); i++ { diff --git a/receiver/couchdbreceiver/scraper.go b/receiver/couchdbreceiver/scraper.go index b19b20e41c6f..952d1463b509 100644 --- a/receiver/couchdbreceiver/scraper.go +++ b/receiver/couchdbreceiver/scraper.go @@ -21,7 +21,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -52,9 +53,9 @@ func (c *couchdbScraper) start(_ context.Context, host component.Host) error { return nil } -func (c *couchdbScraper) scrape(context.Context) (pdata.Metrics, error) { +func (c *couchdbScraper) scrape(context.Context) (pmetric.Metrics, error) { if c.client == nil { - return pdata.NewMetrics(), errors.New("no client available") + return pmetric.NewMetrics(), errors.New("no client available") } localNode := "_local" @@ -64,10 +65,10 @@ func (c *couchdbScraper) scrape(context.Context) (pdata.Metrics, error) { zap.String("endpoint", c.config.Endpoint), zap.Error(err), ) - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) var errors scrapererror.ScrapeErrors c.recordCouchdbAverageRequestTimeDataPoint(now, stats, errors) diff --git a/receiver/dockerstatsreceiver/go.mod b/receiver/dockerstatsreceiver/go.mod index cd1469abd9ed..cede0c44a2ea 100644 --- a/receiver/dockerstatsreceiver/go.mod +++ b/receiver/dockerstatsreceiver/go.mod @@ -7,14 +7,13 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 - ) -require go.uber.org/multierr v1.8.0 - require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.5.1 // indirect @@ -29,7 +28,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -40,13 +39,12 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -61,3 +59,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/docke // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/dockerstatsreceiver/go.sum b/receiver/dockerstatsreceiver/go.sum index bf02b12da5d8..dd2b74718860 100644 --- a/receiver/dockerstatsreceiver/go.sum +++ b/receiver/dockerstatsreceiver/go.sum @@ -458,8 +458,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -641,8 +641,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -709,17 +707,19 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -817,7 +817,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -901,8 +901,8 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/dockerstatsreceiver/metrics.go b/receiver/dockerstatsreceiver/metrics.go index 3cd3f4f7ea24..44e45a68c968 100644 --- a/receiver/dockerstatsreceiver/metrics.go +++ b/receiver/dockerstatsreceiver/metrics.go @@ -21,8 +21,9 @@ import ( "strings" dtypes "github.com/docker/docker/api/types" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker" ) @@ -32,12 +33,12 @@ const ( ) func ContainerStatsToMetrics( - now pdata.Timestamp, + now pcommon.Timestamp, containerStats *dtypes.StatsJSON, container docker.Container, config *Config, -) pdata.Metrics { - md := pdata.NewMetrics() +) pmetric.Metrics { + md := pmetric.NewMetrics() rs := md.ResourceMetrics().AppendEmpty() rs.SetSchemaUrl(conventions.SchemaURL) resourceAttr := rs.Resource().Attributes() @@ -57,7 +58,7 @@ func ContainerStatsToMetrics( return md } -func updateConfiguredResourceAttributes(resourceAttr pdata.Map, container docker.Container, config *Config) { +func updateConfiguredResourceAttributes(resourceAttr pcommon.Map, container docker.Container, config *Config) { for k, label := range config.EnvVarsToMetricLabels { if v := container.EnvMap[k]; v != "" { resourceAttr.UpsertString(label, v) @@ -78,7 +79,7 @@ type blkioStat struct { } // metrics for https://www.kernel.org/doc/Documentation/cgroup-v1/blkio-controller.txt -func appendBlockioMetrics(dest pdata.MetricSlice, blkioStats *dtypes.BlkioStats, ts pdata.Timestamp) { +func appendBlockioMetrics(dest pmetric.MetricSlice, blkioStats *dtypes.BlkioStats, ts pcommon.Timestamp) { for _, blkiostat := range []blkioStat{ {"io_merged_recursive", "1", blkioStats.IoMergedRecursive}, {"io_queued_recursive", "1", blkioStats.IoQueuedRecursive}, @@ -103,7 +104,7 @@ func appendBlockioMetrics(dest pdata.MetricSlice, blkioStats *dtypes.BlkioStats, } } -func appendCPUMetrics(dest pdata.MetricSlice, cpuStats *dtypes.CPUStats, previousCPUStats *dtypes.CPUStats, ts pdata.Timestamp, providePerCoreMetrics bool) { +func appendCPUMetrics(dest pmetric.MetricSlice, cpuStats *dtypes.CPUStats, previousCPUStats *dtypes.CPUStats, ts pcommon.Timestamp, providePerCoreMetrics bool) { populateCumulative(dest.AppendEmpty(), "cpu.usage.system", "ns", int64(cpuStats.SystemUsage), ts, nil, nil) populateCumulative(dest.AppendEmpty(), "cpu.usage.total", "ns", int64(cpuStats.CPUUsage.TotalUsage), ts, nil, nil) @@ -171,7 +172,7 @@ var memoryStatsThatAreCumulative = map[string]bool{ "total_pgpgout": true, } -func appendMemoryMetrics(dest pdata.MetricSlice, memoryStats *dtypes.MemoryStats, ts pdata.Timestamp) { +func appendMemoryMetrics(dest pmetric.MetricSlice, memoryStats *dtypes.MemoryStats, ts pcommon.Timestamp) { totalUsage := int64(memoryStats.Usage - memoryStats.Stats["total_cache"]) populateGauge(dest.AppendEmpty(), "memory.usage.limit", int64(memoryStats.Limit), ts) populateGauge(dest.AppendEmpty(), "memory.usage.total", totalUsage, ts) @@ -204,7 +205,7 @@ func appendMemoryMetrics(dest pdata.MetricSlice, memoryStats *dtypes.MemoryStats } } -func appendNetworkMetrics(dest pdata.MetricSlice, networks *map[string]dtypes.NetworkStats, ts pdata.Timestamp) { +func appendNetworkMetrics(dest pmetric.MetricSlice, networks *map[string]dtypes.NetworkStats, ts pcommon.Timestamp) { if networks == nil || *networks == nil { return } @@ -225,22 +226,22 @@ func appendNetworkMetrics(dest pdata.MetricSlice, networks *map[string]dtypes.Ne } } -func populateCumulative(dest pdata.Metric, name string, unit string, val int64, ts pdata.Timestamp, labelKeys []string, labelValues []string) { - populateMetricMetadata(dest, name, unit, pdata.MetricDataTypeSum) +func populateCumulative(dest pmetric.Metric, name string, unit string, val int64, ts pcommon.Timestamp, labelKeys []string, labelValues []string) { + populateMetricMetadata(dest, name, unit, pmetric.MetricDataTypeSum) sum := dest.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dp := sum.DataPoints().AppendEmpty() dp.SetIntVal(val) dp.SetTimestamp(ts) populateAttributes(dp.Attributes(), labelKeys, labelValues) } -func populateCumulativeMultiPoints(dest pdata.Metric, name string, unit string, vals []int64, ts pdata.Timestamp, labelKeys []string, labelValues [][]string) { - populateMetricMetadata(dest, name, unit, pdata.MetricDataTypeSum) +func populateCumulativeMultiPoints(dest pmetric.Metric, name string, unit string, vals []int64, ts pcommon.Timestamp, labelKeys []string, labelValues [][]string) { + populateMetricMetadata(dest, name, unit, pmetric.MetricDataTypeSum) sum := dest.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dps := sum.DataPoints() dps.EnsureCapacity(len(vals)) for i := range vals { @@ -251,9 +252,9 @@ func populateCumulativeMultiPoints(dest pdata.Metric, name string, unit string, } } -func populateGauge(dest pdata.Metric, name string, val int64, ts pdata.Timestamp) { +func populateGauge(dest pmetric.Metric, name string, val int64, ts pcommon.Timestamp) { // Unit, labelKeys, labelValues always constants, when that changes add them as argument to the func. - populateMetricMetadata(dest, name, "By", pdata.MetricDataTypeGauge) + populateMetricMetadata(dest, name, "By", pmetric.MetricDataTypeGauge) sum := dest.Gauge() dp := sum.DataPoints().AppendEmpty() dp.SetIntVal(val) @@ -261,8 +262,8 @@ func populateGauge(dest pdata.Metric, name string, val int64, ts pdata.Timestamp populateAttributes(dp.Attributes(), nil, nil) } -func populateGaugeF(dest pdata.Metric, name string, unit string, val float64, ts pdata.Timestamp, labelKeys []string, labelValues []string) { - populateMetricMetadata(dest, name, unit, pdata.MetricDataTypeGauge) +func populateGaugeF(dest pmetric.Metric, name string, unit string, val float64, ts pcommon.Timestamp, labelKeys []string, labelValues []string) { + populateMetricMetadata(dest, name, unit, pmetric.MetricDataTypeGauge) sum := dest.Gauge() dp := sum.DataPoints().AppendEmpty() dp.SetDoubleVal(val) @@ -270,13 +271,13 @@ func populateGaugeF(dest pdata.Metric, name string, unit string, val float64, ts populateAttributes(dp.Attributes(), labelKeys, labelValues) } -func populateMetricMetadata(dest pdata.Metric, name string, unit string, ty pdata.MetricDataType) { +func populateMetricMetadata(dest pmetric.Metric, name string, unit string, ty pmetric.MetricDataType) { dest.SetName(metricPrefix + name) dest.SetUnit(unit) dest.SetDataType(ty) } -func populateAttributes(dest pdata.Map, labelKeys []string, labelValues []string) { +func populateAttributes(dest pcommon.Map, labelKeys []string, labelValues []string) { for i := range labelKeys { dest.UpsertString(labelKeys[i], labelValues[i]) } diff --git a/receiver/dockerstatsreceiver/metrics_test.go b/receiver/dockerstatsreceiver/metrics_test.go index b7a70fda8940..bbac1368513e 100644 --- a/receiver/dockerstatsreceiver/metrics_test.go +++ b/receiver/dockerstatsreceiver/metrics_test.go @@ -23,8 +23,9 @@ import ( dtypes "github.com/docker/docker/api/types" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker" ) @@ -52,12 +53,12 @@ type Value struct { } func metricsData( - ts pdata.Timestamp, + ts pcommon.Timestamp, resourceLabels map[string]string, metrics ...Metric, -) pdata.Metrics { +) pmetric.Metrics { rLabels := mergeMaps(defaultLabels(), resourceLabels) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rs := md.ResourceMetrics().AppendEmpty() rs.SetSchemaUrl(conventions.SchemaURL) rsAttr := rs.Resource().Attributes() @@ -73,15 +74,15 @@ func metricsData( mdMetric.SetName(m.name) mdMetric.SetUnit(m.unit) - var dps pdata.NumberDataPointSlice + var dps pmetric.NumberDataPointSlice switch m.mtype { case MetricTypeCumulative: - mdMetric.SetDataType(pdata.MetricDataTypeSum) + mdMetric.SetDataType(pmetric.MetricDataTypeSum) mdMetric.Sum().SetIsMonotonic(true) - mdMetric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + mdMetric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dps = mdMetric.Sum().DataPoints() case MetricTypeGauge, MetricTypeDoubleGauge: - mdMetric.SetDataType(pdata.MetricDataTypeGauge) + mdMetric.SetDataType(pmetric.MetricDataTypeGauge) dps = mdMetric.Gauge().DataPoints() } @@ -191,10 +192,10 @@ func mergeMaps(maps ...map[string]string) map[string]string { func assertMetricsDataEqual( t *testing.T, - now pdata.Timestamp, + now pcommon.Timestamp, expected []Metric, labels map[string]string, - actual pdata.Metrics, + actual pmetric.Metrics, ) { actual.ResourceMetrics().At(0).Resource().Attributes().Sort() assert.Equal(t, metricsData(now, labels, expected...), actual) @@ -212,7 +213,7 @@ func TestZeroValueStats(t *testing.T) { containers := containerJSON(t) config := &Config{} - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) md := ContainerStatsToMetrics(now, stats, containers, config) metrics := []Metric{ @@ -268,7 +269,7 @@ func TestStatsToDefaultMetrics(t *testing.T) { containers := containerJSON(t) config := &Config{} - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) md := ContainerStatsToMetrics(now, stats, containers, config) assertMetricsDataEqual(t, now, defaultMetrics(), nil, md) @@ -281,7 +282,7 @@ func TestStatsToAllMetrics(t *testing.T) { ProvidePerCoreCPUMetrics: true, } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) md := ContainerStatsToMetrics(now, stats, containers, config) metrics := []Metric{ @@ -380,7 +381,7 @@ func TestEnvVarToMetricLabels(t *testing.T) { }, } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) md := ContainerStatsToMetrics(now, stats, containers, config) expectedLabels := map[string]string{ @@ -401,7 +402,7 @@ func TestContainerLabelToMetricLabels(t *testing.T) { }, } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) md := ContainerStatsToMetrics(now, stats, containers, config) expectedLabels := map[string]string{ diff --git a/receiver/dockerstatsreceiver/receiver.go b/receiver/dockerstatsreceiver/receiver.go index 842110e51b5a..954238c15e9e 100644 --- a/receiver/dockerstatsreceiver/receiver.go +++ b/receiver/dockerstatsreceiver/receiver.go @@ -21,7 +21,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/multierr" @@ -78,11 +79,11 @@ func (r *receiver) start(ctx context.Context, _ component.Host) error { } type result struct { - md pdata.Metrics + md pmetric.Metrics err error } -func (r *receiver) scrape(ctx context.Context) (pdata.Metrics, error) { +func (r *receiver) scrape(ctx context.Context) (pmetric.Metrics, error) { containers := r.client.Containers() results := make(chan result, len(containers)) @@ -93,12 +94,12 @@ func (r *receiver) scrape(ctx context.Context) (pdata.Metrics, error) { defer wg.Done() statsJSON, err := r.client.FetchContainerStatsAsJSON(ctx, c) if err != nil { - results <- result{md: pdata.Metrics{}, err: err} + results <- result{md: pmetric.Metrics{}, err: err} return } results <- result{ - md: ContainerStatsToMetrics(pdata.NewTimestampFromTime(time.Now()), statsJSON, c, r.config), + md: ContainerStatsToMetrics(pcommon.NewTimestampFromTime(time.Now()), statsJSON, c, r.config), err: nil} }(container) } @@ -107,7 +108,7 @@ func (r *receiver) scrape(ctx context.Context) (pdata.Metrics, error) { close(results) var errs error - md := pdata.NewMetrics() + md := pmetric.NewMetrics() for res := range results { if res.err != nil { // Don't know the number of failed metrics, but one container fetch is a partial error. diff --git a/receiver/dotnetdiagnosticsreceiver/go.mod b/receiver/dotnetdiagnosticsreceiver/go.mod index ec5d6312dc7c..f9b11b799a11 100644 --- a/receiver/dotnetdiagnosticsreceiver/go.mod +++ b/receiver/dotnetdiagnosticsreceiver/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -13,7 +13,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -21,7 +21,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -32,3 +31,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/dotnetdiagnosticsreceiver/go.sum b/receiver/dotnetdiagnosticsreceiver/go.sum index bf571f524adf..53a53c0a3011 100644 --- a/receiver/dotnetdiagnosticsreceiver/go.sum +++ b/receiver/dotnetdiagnosticsreceiver/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -70,7 +70,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -148,8 +147,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -164,17 +161,17 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -208,7 +205,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -231,7 +228,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/dotnetdiagnosticsreceiver/metrics/converter.go b/receiver/dotnetdiagnosticsreceiver/metrics/converter.go index b0112ecbf891..8fb249945eda 100644 --- a/receiver/dotnetdiagnosticsreceiver/metrics/converter.go +++ b/receiver/dotnetdiagnosticsreceiver/metrics/converter.go @@ -17,13 +17,14 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-con import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver/dotnet" ) -func rawMetricsToPdata(rawMetrics []dotnet.Metric, startTime, now time.Time) pdata.Metrics { - pdm := pdata.NewMetrics() +func rawMetricsToPdata(rawMetrics []dotnet.Metric, startTime, now time.Time) pmetric.Metrics { + pdm := pmetric.NewMetrics() rms := pdm.ResourceMetrics() rm := rms.AppendEmpty() rm.Resource().Attributes() @@ -37,26 +38,26 @@ func rawMetricsToPdata(rawMetrics []dotnet.Metric, startTime, now time.Time) pda return pdm } -func rawMetricToPdata(dm dotnet.Metric, pdm pdata.Metric, startTime, now time.Time) pdata.Metric { +func rawMetricToPdata(dm dotnet.Metric, pdm pmetric.Metric, startTime, now time.Time) pmetric.Metric { const metricNamePrefix = "dotnet." pdm.SetName(metricNamePrefix + dm.Name()) pdm.SetDescription(dm.DisplayName()) pdm.SetUnit(mapUnits(dm.DisplayUnits())) - nowPD := pdata.NewTimestampFromTime(now) + nowPD := pcommon.NewTimestampFromTime(now) switch dm.CounterType() { case "Mean": - pdm.SetDataType(pdata.MetricDataTypeGauge) + pdm.SetDataType(pmetric.MetricDataTypeGauge) dps := pdm.Gauge().DataPoints() dp := dps.AppendEmpty() dp.SetTimestamp(nowPD) dp.SetDoubleVal(dm.Mean()) case "Sum": - pdm.SetDataType(pdata.MetricDataTypeSum) + pdm.SetDataType(pmetric.MetricDataTypeSum) sum := pdm.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) dps := sum.DataPoints() dp := dps.AppendEmpty() - dp.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) dp.SetTimestamp(nowPD) dp.SetDoubleVal(dm.Increment()) } diff --git a/receiver/dotnetdiagnosticsreceiver/metrics/converter_test.go b/receiver/dotnetdiagnosticsreceiver/metrics/converter_test.go index 0eafba6cc6fe..743c977cfa6d 100644 --- a/receiver/dotnetdiagnosticsreceiver/metrics/converter_test.go +++ b/receiver/dotnetdiagnosticsreceiver/metrics/converter_test.go @@ -22,7 +22,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/dotnetdiagnosticsreceiver/dotnet" ) @@ -36,7 +37,7 @@ func TestMeanMetricToPdata(t *testing.T) { assert.Equal(t, 1, pts.Len()) pt := pts.At(0) assert.EqualValues(t, 0, pt.StartTimestamp()) - assert.Equal(t, pdata.NewTimestampFromTime(time.Unix(111, 0)), pt.Timestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(time.Unix(111, 0)), pt.Timestamp()) assert.Equal(t, 0.5, pt.DoubleVal()) } @@ -50,12 +51,12 @@ func TestSumMetricToPdata(t *testing.T) { pts := sum.DataPoints() assert.Equal(t, 1, pts.Len()) pt := pts.At(0) - assert.Equal(t, pdata.NewTimestampFromTime(time.Unix(42, 0)), pt.StartTimestamp()) - assert.Equal(t, pdata.NewTimestampFromTime(time.Unix(111, 0)), pt.Timestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(time.Unix(42, 0)), pt.StartTimestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(time.Unix(111, 0)), pt.Timestamp()) assert.Equal(t, 262672.0, pt.DoubleVal()) } -func testMetricConversion(t *testing.T, metricFile int, expectedName string, expectedUnits string) pdata.Metric { +func testMetricConversion(t *testing.T, metricFile int, expectedName string, expectedUnits string) pmetric.Metric { rm := readTestdataMetric(metricFile) pdms := rawMetricsToPdata([]dotnet.Metric{rm}, time.Unix(42, 0), time.Unix(111, 0)) rms := pdms.ResourceMetrics() diff --git a/receiver/dotnetdiagnosticsreceiver/metrics/sender.go b/receiver/dotnetdiagnosticsreceiver/metrics/sender.go index 82daedd40a91..2cc3b073a0a8 100644 --- a/receiver/dotnetdiagnosticsreceiver/metrics/sender.go +++ b/receiver/dotnetdiagnosticsreceiver/metrics/sender.go @@ -36,7 +36,7 @@ func NewSender(next consumer.Metrics, logger *zap.Logger) *Sender { return &Sender{next: next, logger: logger} } -// Send accepts a slice of dotnet.Metrics, converts them to pdata.Metrics, and +// Send accepts a slice of dotnet.Metrics, converts them to pmetric.Metrics, and // sends them to the next pdata consumer. Conforms to dotnet.MetricsConsumer. func (s *Sender) Send(rawMetrics []dotnet.Metric) { now := time.Now() diff --git a/receiver/elasticsearchreceiver/go.mod b/receiver/elasticsearchreceiver/go.mod index 62512653c0dd..34c728a34db6 100644 --- a/receiver/elasticsearchreceiver/go.mod +++ b/receiver/elasticsearchreceiver/go.mod @@ -4,8 +4,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -20,7 +19,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -30,7 +29,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect @@ -38,9 +36,7 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect @@ -50,7 +46,10 @@ require ( require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 - golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/elasticsearchreceiver/go.sum b/receiver/elasticsearchreceiver/go.sum index 60e51957fc85..4eccc37f77db 100644 --- a/receiver/elasticsearchreceiver/go.sum +++ b/receiver/elasticsearchreceiver/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -18,15 +17,13 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -38,7 +35,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -92,7 +88,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -124,8 +119,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -173,9 +168,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -193,10 +185,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -207,7 +199,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -246,8 +238,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8= -golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -272,13 +264,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -304,7 +294,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -314,7 +303,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -328,7 +316,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/receiver/elasticsearchreceiver/internal/metadata/emitters.go b/receiver/elasticsearchreceiver/internal/metadata/emitters.go index 03c5774f3ed9..213b9f31c7f2 100644 --- a/receiver/elasticsearchreceiver/internal/metadata/emitters.go +++ b/receiver/elasticsearchreceiver/internal/metadata/emitters.go @@ -14,9 +14,9 @@ package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver/internal/metadata" -import "go.opentelemetry.io/collector/model/pdata" +import "go.opentelemetry.io/collector/pdata/pmetric" -func (mb *MetricsBuilder) EmitNodeMetrics(metrics pdata.MetricSlice) { +func (mb *MetricsBuilder) EmitNodeMetrics(metrics pmetric.MetricSlice) { mb.metricElasticsearchNodeCacheEvictions.emit(metrics) mb.metricElasticsearchNodeCacheMemoryUsage.emit(metrics) mb.metricElasticsearchNodeClusterConnections.emit(metrics) @@ -44,7 +44,7 @@ func (mb *MetricsBuilder) EmitNodeMetrics(metrics pdata.MetricSlice) { mb.metricJvmThreadsCount.emit(metrics) } -func (mb *MetricsBuilder) EmitClusterMetrics(metrics pdata.MetricSlice) { +func (mb *MetricsBuilder) EmitClusterMetrics(metrics pmetric.MetricSlice) { mb.metricElasticsearchClusterDataNodes.emit(metrics) mb.metricElasticsearchClusterNodes.emit(metrics) mb.metricElasticsearchClusterShards.emit(metrics) diff --git a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go index 0b2320ca8223..349e6451194f 100644 --- a/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/elasticsearchreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -139,7 +140,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricElasticsearchClusterDataNodes struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -149,12 +150,12 @@ func (m *metricElasticsearchClusterDataNodes) init() { m.data.SetName("elasticsearch.cluster.data_nodes") m.data.SetDescription("The number of data nodes in the cluster.") m.data.SetUnit("{nodes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricElasticsearchClusterDataNodes) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricElasticsearchClusterDataNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -172,7 +173,7 @@ func (m *metricElasticsearchClusterDataNodes) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchClusterDataNodes) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchClusterDataNodes) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -183,14 +184,14 @@ func (m *metricElasticsearchClusterDataNodes) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchClusterDataNodes(settings MetricSettings) metricElasticsearchClusterDataNodes { m := metricElasticsearchClusterDataNodes{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterHealth struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -200,13 +201,13 @@ func (m *metricElasticsearchClusterHealth) init() { m.data.SetName("elasticsearch.cluster.health") m.data.SetDescription("The health status of the cluster.") m.data.SetUnit("{status}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchClusterHealth) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, healthStatusAttributeValue string) { +func (m *metricElasticsearchClusterHealth) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, healthStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -214,7 +215,7 @@ func (m *metricElasticsearchClusterHealth) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.HealthStatus, pdata.NewValueString(healthStatusAttributeValue)) + dp.Attributes().Insert(A.HealthStatus, pcommon.NewValueString(healthStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -225,7 +226,7 @@ func (m *metricElasticsearchClusterHealth) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchClusterHealth) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchClusterHealth) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -236,14 +237,14 @@ func (m *metricElasticsearchClusterHealth) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchClusterHealth(settings MetricSettings) metricElasticsearchClusterHealth { m := metricElasticsearchClusterHealth{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterNodes struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -253,12 +254,12 @@ func (m *metricElasticsearchClusterNodes) init() { m.data.SetName("elasticsearch.cluster.nodes") m.data.SetDescription("The total number of nodes in the cluster.") m.data.SetUnit("{nodes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricElasticsearchClusterNodes) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricElasticsearchClusterNodes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -276,7 +277,7 @@ func (m *metricElasticsearchClusterNodes) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchClusterNodes) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchClusterNodes) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -287,14 +288,14 @@ func (m *metricElasticsearchClusterNodes) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchClusterNodes(settings MetricSettings) metricElasticsearchClusterNodes { m := metricElasticsearchClusterNodes{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchClusterShards struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -304,13 +305,13 @@ func (m *metricElasticsearchClusterShards) init() { m.data.SetName("elasticsearch.cluster.shards") m.data.SetDescription("The number of shards in the cluster.") m.data.SetUnit("{shards}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchClusterShards) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, shardStateAttributeValue string) { +func (m *metricElasticsearchClusterShards) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, shardStateAttributeValue string) { if !m.settings.Enabled { return } @@ -318,7 +319,7 @@ func (m *metricElasticsearchClusterShards) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ShardState, pdata.NewValueString(shardStateAttributeValue)) + dp.Attributes().Insert(A.ShardState, pcommon.NewValueString(shardStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -329,7 +330,7 @@ func (m *metricElasticsearchClusterShards) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchClusterShards) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchClusterShards) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -340,14 +341,14 @@ func (m *metricElasticsearchClusterShards) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchClusterShards(settings MetricSettings) metricElasticsearchClusterShards { m := metricElasticsearchClusterShards{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeCacheEvictions struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -357,13 +358,13 @@ func (m *metricElasticsearchNodeCacheEvictions) init() { m.data.SetName("elasticsearch.node.cache.evictions") m.data.SetDescription("The number of evictions from the cache.") m.data.SetUnit("{evictions}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeCacheEvictions) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, cacheNameAttributeValue string) { +func (m *metricElasticsearchNodeCacheEvictions) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) { if !m.settings.Enabled { return } @@ -371,7 +372,7 @@ func (m *metricElasticsearchNodeCacheEvictions) recordDataPoint(start pdata.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.CacheName, pdata.NewValueString(cacheNameAttributeValue)) + dp.Attributes().Insert(A.CacheName, pcommon.NewValueString(cacheNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -382,7 +383,7 @@ func (m *metricElasticsearchNodeCacheEvictions) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeCacheEvictions) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeCacheEvictions) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -393,14 +394,14 @@ func (m *metricElasticsearchNodeCacheEvictions) emit(metrics pdata.MetricSlice) func newMetricElasticsearchNodeCacheEvictions(settings MetricSettings) metricElasticsearchNodeCacheEvictions { m := metricElasticsearchNodeCacheEvictions{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeCacheMemoryUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -410,13 +411,13 @@ func (m *metricElasticsearchNodeCacheMemoryUsage) init() { m.data.SetName("elasticsearch.node.cache.memory.usage") m.data.SetDescription("The size in bytes of the cache.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeCacheMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, cacheNameAttributeValue string) { +func (m *metricElasticsearchNodeCacheMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) { if !m.settings.Enabled { return } @@ -424,7 +425,7 @@ func (m *metricElasticsearchNodeCacheMemoryUsage) recordDataPoint(start pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.CacheName, pdata.NewValueString(cacheNameAttributeValue)) + dp.Attributes().Insert(A.CacheName, pcommon.NewValueString(cacheNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -435,7 +436,7 @@ func (m *metricElasticsearchNodeCacheMemoryUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeCacheMemoryUsage) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeCacheMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -446,14 +447,14 @@ func (m *metricElasticsearchNodeCacheMemoryUsage) emit(metrics pdata.MetricSlice func newMetricElasticsearchNodeCacheMemoryUsage(settings MetricSettings) metricElasticsearchNodeCacheMemoryUsage { m := metricElasticsearchNodeCacheMemoryUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeClusterConnections struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -463,12 +464,12 @@ func (m *metricElasticsearchNodeClusterConnections) init() { m.data.SetName("elasticsearch.node.cluster.connections") m.data.SetDescription("The number of open tcp connections for internal cluster communication.") m.data.SetUnit("{connections}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricElasticsearchNodeClusterConnections) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricElasticsearchNodeClusterConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -486,7 +487,7 @@ func (m *metricElasticsearchNodeClusterConnections) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeClusterConnections) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeClusterConnections) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -497,14 +498,14 @@ func (m *metricElasticsearchNodeClusterConnections) emit(metrics pdata.MetricSli func newMetricElasticsearchNodeClusterConnections(settings MetricSettings) metricElasticsearchNodeClusterConnections { m := metricElasticsearchNodeClusterConnections{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeClusterIo struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -514,13 +515,13 @@ func (m *metricElasticsearchNodeClusterIo) init() { m.data.SetName("elasticsearch.node.cluster.io") m.data.SetDescription("The number of bytes sent and received on the network for internal cluster communication.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeClusterIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, directionAttributeValue string) { +func (m *metricElasticsearchNodeClusterIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -528,7 +529,7 @@ func (m *metricElasticsearchNodeClusterIo) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -539,7 +540,7 @@ func (m *metricElasticsearchNodeClusterIo) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeClusterIo) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeClusterIo) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -550,14 +551,14 @@ func (m *metricElasticsearchNodeClusterIo) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchNodeClusterIo(settings MetricSettings) metricElasticsearchNodeClusterIo { m := metricElasticsearchNodeClusterIo{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeDocuments struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -567,13 +568,13 @@ func (m *metricElasticsearchNodeDocuments) init() { m.data.SetName("elasticsearch.node.documents") m.data.SetDescription("The number of documents on the node.") m.data.SetUnit("{documents}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeDocuments) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, documentStateAttributeValue string) { +func (m *metricElasticsearchNodeDocuments) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, documentStateAttributeValue string) { if !m.settings.Enabled { return } @@ -581,7 +582,7 @@ func (m *metricElasticsearchNodeDocuments) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.DocumentState, pdata.NewValueString(documentStateAttributeValue)) + dp.Attributes().Insert(A.DocumentState, pcommon.NewValueString(documentStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -592,7 +593,7 @@ func (m *metricElasticsearchNodeDocuments) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeDocuments) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeDocuments) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -603,14 +604,14 @@ func (m *metricElasticsearchNodeDocuments) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchNodeDocuments(settings MetricSettings) metricElasticsearchNodeDocuments { m := metricElasticsearchNodeDocuments{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeFsDiskAvailable struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -620,12 +621,12 @@ func (m *metricElasticsearchNodeFsDiskAvailable) init() { m.data.SetName("elasticsearch.node.fs.disk.available") m.data.SetDescription("The amount of disk space available across all file stores for this node.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricElasticsearchNodeFsDiskAvailable) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricElasticsearchNodeFsDiskAvailable) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -643,7 +644,7 @@ func (m *metricElasticsearchNodeFsDiskAvailable) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeFsDiskAvailable) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeFsDiskAvailable) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -654,14 +655,14 @@ func (m *metricElasticsearchNodeFsDiskAvailable) emit(metrics pdata.MetricSlice) func newMetricElasticsearchNodeFsDiskAvailable(settings MetricSettings) metricElasticsearchNodeFsDiskAvailable { m := metricElasticsearchNodeFsDiskAvailable{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeHTTPConnections struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -671,12 +672,12 @@ func (m *metricElasticsearchNodeHTTPConnections) init() { m.data.SetName("elasticsearch.node.http.connections") m.data.SetDescription("The number of HTTP connections to the node.") m.data.SetUnit("{connections}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricElasticsearchNodeHTTPConnections) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricElasticsearchNodeHTTPConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -694,7 +695,7 @@ func (m *metricElasticsearchNodeHTTPConnections) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeHTTPConnections) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeHTTPConnections) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -705,14 +706,14 @@ func (m *metricElasticsearchNodeHTTPConnections) emit(metrics pdata.MetricSlice) func newMetricElasticsearchNodeHTTPConnections(settings MetricSettings) metricElasticsearchNodeHTTPConnections { m := metricElasticsearchNodeHTTPConnections{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOpenFiles struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -722,12 +723,12 @@ func (m *metricElasticsearchNodeOpenFiles) init() { m.data.SetName("elasticsearch.node.open_files") m.data.SetDescription("The number of open file descriptors held by the node.") m.data.SetUnit("{files}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricElasticsearchNodeOpenFiles) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricElasticsearchNodeOpenFiles) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -745,7 +746,7 @@ func (m *metricElasticsearchNodeOpenFiles) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeOpenFiles) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeOpenFiles) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -756,14 +757,14 @@ func (m *metricElasticsearchNodeOpenFiles) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchNodeOpenFiles(settings MetricSettings) metricElasticsearchNodeOpenFiles { m := metricElasticsearchNodeOpenFiles{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOperationsCompleted struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -773,13 +774,13 @@ func (m *metricElasticsearchNodeOperationsCompleted) init() { m.data.SetName("elasticsearch.node.operations.completed") m.data.SetDescription("The number of operations completed.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeOperationsCompleted) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (m *metricElasticsearchNodeOperationsCompleted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.settings.Enabled { return } @@ -787,7 +788,7 @@ func (m *metricElasticsearchNodeOperationsCompleted) recordDataPoint(start pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -798,7 +799,7 @@ func (m *metricElasticsearchNodeOperationsCompleted) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeOperationsCompleted) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeOperationsCompleted) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -809,14 +810,14 @@ func (m *metricElasticsearchNodeOperationsCompleted) emit(metrics pdata.MetricSl func newMetricElasticsearchNodeOperationsCompleted(settings MetricSettings) metricElasticsearchNodeOperationsCompleted { m := metricElasticsearchNodeOperationsCompleted{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeOperationsTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -826,13 +827,13 @@ func (m *metricElasticsearchNodeOperationsTime) init() { m.data.SetName("elasticsearch.node.operations.time") m.data.SetDescription("Time spent on operations.") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeOperationsTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (m *metricElasticsearchNodeOperationsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.settings.Enabled { return } @@ -840,7 +841,7 @@ func (m *metricElasticsearchNodeOperationsTime) recordDataPoint(start pdata.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -851,7 +852,7 @@ func (m *metricElasticsearchNodeOperationsTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeOperationsTime) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeOperationsTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -862,14 +863,14 @@ func (m *metricElasticsearchNodeOperationsTime) emit(metrics pdata.MetricSlice) func newMetricElasticsearchNodeOperationsTime(settings MetricSettings) metricElasticsearchNodeOperationsTime { m := metricElasticsearchNodeOperationsTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeShardsSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -879,12 +880,12 @@ func (m *metricElasticsearchNodeShardsSize) init() { m.data.SetName("elasticsearch.node.shards.size") m.data.SetDescription("The size of the shards assigned to this node.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricElasticsearchNodeShardsSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricElasticsearchNodeShardsSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -902,7 +903,7 @@ func (m *metricElasticsearchNodeShardsSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeShardsSize) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeShardsSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -913,14 +914,14 @@ func (m *metricElasticsearchNodeShardsSize) emit(metrics pdata.MetricSlice) { func newMetricElasticsearchNodeShardsSize(settings MetricSettings) metricElasticsearchNodeShardsSize { m := metricElasticsearchNodeShardsSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeThreadPoolTasksFinished struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -930,13 +931,13 @@ func (m *metricElasticsearchNodeThreadPoolTasksFinished) init() { m.data.SetName("elasticsearch.node.thread_pool.tasks.finished") m.data.SetDescription("The number of tasks finished by the thread pool.") m.data.SetUnit("{tasks}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeThreadPoolTasksFinished) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue string) { +func (m *metricElasticsearchNodeThreadPoolTasksFinished) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue string) { if !m.settings.Enabled { return } @@ -944,8 +945,8 @@ func (m *metricElasticsearchNodeThreadPoolTasksFinished) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ThreadPoolName, pdata.NewValueString(threadPoolNameAttributeValue)) - dp.Attributes().Insert(A.TaskState, pdata.NewValueString(taskStateAttributeValue)) + dp.Attributes().Insert(A.ThreadPoolName, pcommon.NewValueString(threadPoolNameAttributeValue)) + dp.Attributes().Insert(A.TaskState, pcommon.NewValueString(taskStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -956,7 +957,7 @@ func (m *metricElasticsearchNodeThreadPoolTasksFinished) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeThreadPoolTasksFinished) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeThreadPoolTasksFinished) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -967,14 +968,14 @@ func (m *metricElasticsearchNodeThreadPoolTasksFinished) emit(metrics pdata.Metr func newMetricElasticsearchNodeThreadPoolTasksFinished(settings MetricSettings) metricElasticsearchNodeThreadPoolTasksFinished { m := metricElasticsearchNodeThreadPoolTasksFinished{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeThreadPoolTasksQueued struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -984,13 +985,13 @@ func (m *metricElasticsearchNodeThreadPoolTasksQueued) init() { m.data.SetName("elasticsearch.node.thread_pool.tasks.queued") m.data.SetDescription("The number of queued tasks in the thread pool.") m.data.SetUnit("{tasks}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeThreadPoolTasksQueued) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, threadPoolNameAttributeValue string) { +func (m *metricElasticsearchNodeThreadPoolTasksQueued) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string) { if !m.settings.Enabled { return } @@ -998,7 +999,7 @@ func (m *metricElasticsearchNodeThreadPoolTasksQueued) recordDataPoint(start pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ThreadPoolName, pdata.NewValueString(threadPoolNameAttributeValue)) + dp.Attributes().Insert(A.ThreadPoolName, pcommon.NewValueString(threadPoolNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1009,7 +1010,7 @@ func (m *metricElasticsearchNodeThreadPoolTasksQueued) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeThreadPoolTasksQueued) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeThreadPoolTasksQueued) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1020,14 +1021,14 @@ func (m *metricElasticsearchNodeThreadPoolTasksQueued) emit(metrics pdata.Metric func newMetricElasticsearchNodeThreadPoolTasksQueued(settings MetricSettings) metricElasticsearchNodeThreadPoolTasksQueued { m := metricElasticsearchNodeThreadPoolTasksQueued{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricElasticsearchNodeThreadPoolThreads struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1037,13 +1038,13 @@ func (m *metricElasticsearchNodeThreadPoolThreads) init() { m.data.SetName("elasticsearch.node.thread_pool.threads") m.data.SetDescription("The number of threads in the thread pool.") m.data.SetUnit("{threads}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricElasticsearchNodeThreadPoolThreads) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue string) { +func (m *metricElasticsearchNodeThreadPoolThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1051,8 +1052,8 @@ func (m *metricElasticsearchNodeThreadPoolThreads) recordDataPoint(start pdata.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.ThreadPoolName, pdata.NewValueString(threadPoolNameAttributeValue)) - dp.Attributes().Insert(A.ThreadState, pdata.NewValueString(threadStateAttributeValue)) + dp.Attributes().Insert(A.ThreadPoolName, pcommon.NewValueString(threadPoolNameAttributeValue)) + dp.Attributes().Insert(A.ThreadState, pcommon.NewValueString(threadStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1063,7 +1064,7 @@ func (m *metricElasticsearchNodeThreadPoolThreads) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricElasticsearchNodeThreadPoolThreads) emit(metrics pdata.MetricSlice) { +func (m *metricElasticsearchNodeThreadPoolThreads) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1074,14 +1075,14 @@ func (m *metricElasticsearchNodeThreadPoolThreads) emit(metrics pdata.MetricSlic func newMetricElasticsearchNodeThreadPoolThreads(settings MetricSettings) metricElasticsearchNodeThreadPoolThreads { m := metricElasticsearchNodeThreadPoolThreads{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmClassesLoaded struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1091,10 +1092,10 @@ func (m *metricJvmClassesLoaded) init() { m.data.SetName("jvm.classes.loaded") m.data.SetDescription("The number of loaded classes") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricJvmClassesLoaded) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricJvmClassesLoaded) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1112,7 +1113,7 @@ func (m *metricJvmClassesLoaded) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmClassesLoaded) emit(metrics pdata.MetricSlice) { +func (m *metricJvmClassesLoaded) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1123,14 +1124,14 @@ func (m *metricJvmClassesLoaded) emit(metrics pdata.MetricSlice) { func newMetricJvmClassesLoaded(settings MetricSettings) metricJvmClassesLoaded { m := metricJvmClassesLoaded{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmGcCollectionsCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1140,13 +1141,13 @@ func (m *metricJvmGcCollectionsCount) init() { m.data.SetName("jvm.gc.collections.count") m.data.SetDescription("The total number of garbage collections that have occurred") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricJvmGcCollectionsCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, collectorNameAttributeValue string) { +func (m *metricJvmGcCollectionsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { if !m.settings.Enabled { return } @@ -1154,7 +1155,7 @@ func (m *metricJvmGcCollectionsCount) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.CollectorName, pdata.NewValueString(collectorNameAttributeValue)) + dp.Attributes().Insert(A.CollectorName, pcommon.NewValueString(collectorNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1165,7 +1166,7 @@ func (m *metricJvmGcCollectionsCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmGcCollectionsCount) emit(metrics pdata.MetricSlice) { +func (m *metricJvmGcCollectionsCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1176,14 +1177,14 @@ func (m *metricJvmGcCollectionsCount) emit(metrics pdata.MetricSlice) { func newMetricJvmGcCollectionsCount(settings MetricSettings) metricJvmGcCollectionsCount { m := metricJvmGcCollectionsCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmGcCollectionsElapsed struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1193,13 +1194,13 @@ func (m *metricJvmGcCollectionsElapsed) init() { m.data.SetName("jvm.gc.collections.elapsed") m.data.SetDescription("The approximate accumulated collection elapsed time") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricJvmGcCollectionsElapsed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, collectorNameAttributeValue string) { +func (m *metricJvmGcCollectionsElapsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { if !m.settings.Enabled { return } @@ -1207,7 +1208,7 @@ func (m *metricJvmGcCollectionsElapsed) recordDataPoint(start pdata.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.CollectorName, pdata.NewValueString(collectorNameAttributeValue)) + dp.Attributes().Insert(A.CollectorName, pcommon.NewValueString(collectorNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1218,7 +1219,7 @@ func (m *metricJvmGcCollectionsElapsed) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmGcCollectionsElapsed) emit(metrics pdata.MetricSlice) { +func (m *metricJvmGcCollectionsElapsed) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1229,14 +1230,14 @@ func (m *metricJvmGcCollectionsElapsed) emit(metrics pdata.MetricSlice) { func newMetricJvmGcCollectionsElapsed(settings MetricSettings) metricJvmGcCollectionsElapsed { m := metricJvmGcCollectionsElapsed{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryHeapCommitted struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1246,10 +1247,10 @@ func (m *metricJvmMemoryHeapCommitted) init() { m.data.SetName("jvm.memory.heap.committed") m.data.SetDescription("The amount of memory that is guaranteed to be available for the heap") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricJvmMemoryHeapCommitted) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricJvmMemoryHeapCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1267,7 +1268,7 @@ func (m *metricJvmMemoryHeapCommitted) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmMemoryHeapCommitted) emit(metrics pdata.MetricSlice) { +func (m *metricJvmMemoryHeapCommitted) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1278,14 +1279,14 @@ func (m *metricJvmMemoryHeapCommitted) emit(metrics pdata.MetricSlice) { func newMetricJvmMemoryHeapCommitted(settings MetricSettings) metricJvmMemoryHeapCommitted { m := metricJvmMemoryHeapCommitted{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryHeapMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1295,10 +1296,10 @@ func (m *metricJvmMemoryHeapMax) init() { m.data.SetName("jvm.memory.heap.max") m.data.SetDescription("The maximum amount of memory can be used for the heap") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricJvmMemoryHeapMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricJvmMemoryHeapMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1316,7 +1317,7 @@ func (m *metricJvmMemoryHeapMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmMemoryHeapMax) emit(metrics pdata.MetricSlice) { +func (m *metricJvmMemoryHeapMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1327,14 +1328,14 @@ func (m *metricJvmMemoryHeapMax) emit(metrics pdata.MetricSlice) { func newMetricJvmMemoryHeapMax(settings MetricSettings) metricJvmMemoryHeapMax { m := metricJvmMemoryHeapMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryHeapUsed struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1344,10 +1345,10 @@ func (m *metricJvmMemoryHeapUsed) init() { m.data.SetName("jvm.memory.heap.used") m.data.SetDescription("The current heap memory usage") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricJvmMemoryHeapUsed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricJvmMemoryHeapUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1365,7 +1366,7 @@ func (m *metricJvmMemoryHeapUsed) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmMemoryHeapUsed) emit(metrics pdata.MetricSlice) { +func (m *metricJvmMemoryHeapUsed) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1376,14 +1377,14 @@ func (m *metricJvmMemoryHeapUsed) emit(metrics pdata.MetricSlice) { func newMetricJvmMemoryHeapUsed(settings MetricSettings) metricJvmMemoryHeapUsed { m := metricJvmMemoryHeapUsed{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryNonheapCommitted struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1393,10 +1394,10 @@ func (m *metricJvmMemoryNonheapCommitted) init() { m.data.SetName("jvm.memory.nonheap.committed") m.data.SetDescription("The amount of memory that is guaranteed to be available for non-heap purposes") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricJvmMemoryNonheapCommitted) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricJvmMemoryNonheapCommitted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1414,7 +1415,7 @@ func (m *metricJvmMemoryNonheapCommitted) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmMemoryNonheapCommitted) emit(metrics pdata.MetricSlice) { +func (m *metricJvmMemoryNonheapCommitted) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1425,14 +1426,14 @@ func (m *metricJvmMemoryNonheapCommitted) emit(metrics pdata.MetricSlice) { func newMetricJvmMemoryNonheapCommitted(settings MetricSettings) metricJvmMemoryNonheapCommitted { m := metricJvmMemoryNonheapCommitted{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryNonheapUsed struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1442,10 +1443,10 @@ func (m *metricJvmMemoryNonheapUsed) init() { m.data.SetName("jvm.memory.nonheap.used") m.data.SetDescription("The current non-heap memory usage") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricJvmMemoryNonheapUsed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricJvmMemoryNonheapUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1463,7 +1464,7 @@ func (m *metricJvmMemoryNonheapUsed) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmMemoryNonheapUsed) emit(metrics pdata.MetricSlice) { +func (m *metricJvmMemoryNonheapUsed) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1474,14 +1475,14 @@ func (m *metricJvmMemoryNonheapUsed) emit(metrics pdata.MetricSlice) { func newMetricJvmMemoryNonheapUsed(settings MetricSettings) metricJvmMemoryNonheapUsed { m := metricJvmMemoryNonheapUsed{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryPoolMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1491,11 +1492,11 @@ func (m *metricJvmMemoryPoolMax) init() { m.data.SetName("jvm.memory.pool.max") m.data.SetDescription("The maximum amount of memory can be used for the memory pool") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricJvmMemoryPoolMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, memoryPoolNameAttributeValue string) { +func (m *metricJvmMemoryPoolMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { if !m.settings.Enabled { return } @@ -1503,7 +1504,7 @@ func (m *metricJvmMemoryPoolMax) recordDataPoint(start pdata.Timestamp, ts pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.MemoryPoolName, pdata.NewValueString(memoryPoolNameAttributeValue)) + dp.Attributes().Insert(A.MemoryPoolName, pcommon.NewValueString(memoryPoolNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1514,7 +1515,7 @@ func (m *metricJvmMemoryPoolMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmMemoryPoolMax) emit(metrics pdata.MetricSlice) { +func (m *metricJvmMemoryPoolMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1525,14 +1526,14 @@ func (m *metricJvmMemoryPoolMax) emit(metrics pdata.MetricSlice) { func newMetricJvmMemoryPoolMax(settings MetricSettings) metricJvmMemoryPoolMax { m := metricJvmMemoryPoolMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmMemoryPoolUsed struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1542,11 +1543,11 @@ func (m *metricJvmMemoryPoolUsed) init() { m.data.SetName("jvm.memory.pool.used") m.data.SetDescription("The current memory pool memory usage") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricJvmMemoryPoolUsed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, memoryPoolNameAttributeValue string) { +func (m *metricJvmMemoryPoolUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { if !m.settings.Enabled { return } @@ -1554,7 +1555,7 @@ func (m *metricJvmMemoryPoolUsed) recordDataPoint(start pdata.Timestamp, ts pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.MemoryPoolName, pdata.NewValueString(memoryPoolNameAttributeValue)) + dp.Attributes().Insert(A.MemoryPoolName, pcommon.NewValueString(memoryPoolNameAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1565,7 +1566,7 @@ func (m *metricJvmMemoryPoolUsed) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmMemoryPoolUsed) emit(metrics pdata.MetricSlice) { +func (m *metricJvmMemoryPoolUsed) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1576,14 +1577,14 @@ func (m *metricJvmMemoryPoolUsed) emit(metrics pdata.MetricSlice) { func newMetricJvmMemoryPoolUsed(settings MetricSettings) metricJvmMemoryPoolUsed { m := metricJvmMemoryPoolUsed{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricJvmThreadsCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1593,10 +1594,10 @@ func (m *metricJvmThreadsCount) init() { m.data.SetName("jvm.threads.count") m.data.SetDescription("The current number of threads") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricJvmThreadsCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricJvmThreadsCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1614,7 +1615,7 @@ func (m *metricJvmThreadsCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricJvmThreadsCount) emit(metrics pdata.MetricSlice) { +func (m *metricJvmThreadsCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1625,7 +1626,7 @@ func (m *metricJvmThreadsCount) emit(metrics pdata.MetricSlice) { func newMetricJvmThreadsCount(settings MetricSettings) metricJvmThreadsCount { m := metricJvmThreadsCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -1634,10 +1635,10 @@ func newMetricJvmThreadsCount(settings MetricSettings) metricJvmThreadsCount { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricElasticsearchClusterDataNodes metricElasticsearchClusterDataNodes metricElasticsearchClusterHealth metricElasticsearchClusterHealth metricElasticsearchClusterNodes metricElasticsearchClusterNodes @@ -1673,7 +1674,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -1681,8 +1682,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricElasticsearchClusterDataNodes: newMetricElasticsearchClusterDataNodes(settings.ElasticsearchClusterDataNodes), metricElasticsearchClusterHealth: newMetricElasticsearchClusterHealth(settings.ElasticsearchClusterHealth), metricElasticsearchClusterNodes: newMetricElasticsearchClusterNodes(settings.ElasticsearchClusterNodes), @@ -1720,7 +1721,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -1730,14 +1731,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -1783,162 +1784,162 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordElasticsearchClusterDataNodesDataPoint adds a data point to elasticsearch.cluster.data_nodes metric. -func (mb *MetricsBuilder) RecordElasticsearchClusterDataNodesDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordElasticsearchClusterDataNodesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchClusterDataNodes.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchClusterHealthDataPoint adds a data point to elasticsearch.cluster.health metric. -func (mb *MetricsBuilder) RecordElasticsearchClusterHealthDataPoint(ts pdata.Timestamp, val int64, healthStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchClusterHealthDataPoint(ts pcommon.Timestamp, val int64, healthStatusAttributeValue string) { mb.metricElasticsearchClusterHealth.recordDataPoint(mb.startTime, ts, val, healthStatusAttributeValue) } // RecordElasticsearchClusterNodesDataPoint adds a data point to elasticsearch.cluster.nodes metric. -func (mb *MetricsBuilder) RecordElasticsearchClusterNodesDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordElasticsearchClusterNodesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchClusterNodes.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchClusterShardsDataPoint adds a data point to elasticsearch.cluster.shards metric. -func (mb *MetricsBuilder) RecordElasticsearchClusterShardsDataPoint(ts pdata.Timestamp, val int64, shardStateAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchClusterShardsDataPoint(ts pcommon.Timestamp, val int64, shardStateAttributeValue string) { mb.metricElasticsearchClusterShards.recordDataPoint(mb.startTime, ts, val, shardStateAttributeValue) } // RecordElasticsearchNodeCacheEvictionsDataPoint adds a data point to elasticsearch.node.cache.evictions metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeCacheEvictionsDataPoint(ts pdata.Timestamp, val int64, cacheNameAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeCacheEvictionsDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) { mb.metricElasticsearchNodeCacheEvictions.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue) } // RecordElasticsearchNodeCacheMemoryUsageDataPoint adds a data point to elasticsearch.node.cache.memory.usage metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeCacheMemoryUsageDataPoint(ts pdata.Timestamp, val int64, cacheNameAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeCacheMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, cacheNameAttributeValue string) { mb.metricElasticsearchNodeCacheMemoryUsage.recordDataPoint(mb.startTime, ts, val, cacheNameAttributeValue) } // RecordElasticsearchNodeClusterConnectionsDataPoint adds a data point to elasticsearch.node.cluster.connections metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeClusterConnectionsDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordElasticsearchNodeClusterConnectionsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeClusterConnections.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeClusterIoDataPoint adds a data point to elasticsearch.node.cluster.io metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeClusterIoDataPoint(ts pdata.Timestamp, val int64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeClusterIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) { mb.metricElasticsearchNodeClusterIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordElasticsearchNodeDocumentsDataPoint adds a data point to elasticsearch.node.documents metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeDocumentsDataPoint(ts pdata.Timestamp, val int64, documentStateAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeDocumentsDataPoint(ts pcommon.Timestamp, val int64, documentStateAttributeValue string) { mb.metricElasticsearchNodeDocuments.recordDataPoint(mb.startTime, ts, val, documentStateAttributeValue) } // RecordElasticsearchNodeFsDiskAvailableDataPoint adds a data point to elasticsearch.node.fs.disk.available metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeFsDiskAvailableDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordElasticsearchNodeFsDiskAvailableDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeFsDiskAvailable.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeHTTPConnectionsDataPoint adds a data point to elasticsearch.node.http.connections metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeHTTPConnectionsDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordElasticsearchNodeHTTPConnectionsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeHTTPConnections.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeOpenFilesDataPoint adds a data point to elasticsearch.node.open_files metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeOpenFilesDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordElasticsearchNodeOpenFilesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeOpenFiles.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeOperationsCompletedDataPoint adds a data point to elasticsearch.node.operations.completed metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsCompletedDataPoint(ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsCompletedDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) { mb.metricElasticsearchNodeOperationsCompleted.recordDataPoint(mb.startTime, ts, val, operationAttributeValue) } // RecordElasticsearchNodeOperationsTimeDataPoint adds a data point to elasticsearch.node.operations.time metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsTimeDataPoint(ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeOperationsTimeDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) { mb.metricElasticsearchNodeOperationsTime.recordDataPoint(mb.startTime, ts, val, operationAttributeValue) } // RecordElasticsearchNodeShardsSizeDataPoint adds a data point to elasticsearch.node.shards.size metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeShardsSizeDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordElasticsearchNodeShardsSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricElasticsearchNodeShardsSize.recordDataPoint(mb.startTime, ts, val) } // RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint adds a data point to elasticsearch.node.thread_pool.tasks.finished metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(ts pdata.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksFinishedDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, taskStateAttributeValue string) { mb.metricElasticsearchNodeThreadPoolTasksFinished.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, taskStateAttributeValue) } // RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint adds a data point to elasticsearch.node.thread_pool.tasks.queued metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint(ts pdata.Timestamp, val int64, threadPoolNameAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolTasksQueuedDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string) { mb.metricElasticsearchNodeThreadPoolTasksQueued.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue) } // RecordElasticsearchNodeThreadPoolThreadsDataPoint adds a data point to elasticsearch.node.thread_pool.threads metric. -func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolThreadsDataPoint(ts pdata.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue string) { +func (mb *MetricsBuilder) RecordElasticsearchNodeThreadPoolThreadsDataPoint(ts pcommon.Timestamp, val int64, threadPoolNameAttributeValue string, threadStateAttributeValue string) { mb.metricElasticsearchNodeThreadPoolThreads.recordDataPoint(mb.startTime, ts, val, threadPoolNameAttributeValue, threadStateAttributeValue) } // RecordJvmClassesLoadedDataPoint adds a data point to jvm.classes.loaded metric. -func (mb *MetricsBuilder) RecordJvmClassesLoadedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordJvmClassesLoadedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmClassesLoaded.recordDataPoint(mb.startTime, ts, val) } // RecordJvmGcCollectionsCountDataPoint adds a data point to jvm.gc.collections.count metric. -func (mb *MetricsBuilder) RecordJvmGcCollectionsCountDataPoint(ts pdata.Timestamp, val int64, collectorNameAttributeValue string) { +func (mb *MetricsBuilder) RecordJvmGcCollectionsCountDataPoint(ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { mb.metricJvmGcCollectionsCount.recordDataPoint(mb.startTime, ts, val, collectorNameAttributeValue) } // RecordJvmGcCollectionsElapsedDataPoint adds a data point to jvm.gc.collections.elapsed metric. -func (mb *MetricsBuilder) RecordJvmGcCollectionsElapsedDataPoint(ts pdata.Timestamp, val int64, collectorNameAttributeValue string) { +func (mb *MetricsBuilder) RecordJvmGcCollectionsElapsedDataPoint(ts pcommon.Timestamp, val int64, collectorNameAttributeValue string) { mb.metricJvmGcCollectionsElapsed.recordDataPoint(mb.startTime, ts, val, collectorNameAttributeValue) } // RecordJvmMemoryHeapCommittedDataPoint adds a data point to jvm.memory.heap.committed metric. -func (mb *MetricsBuilder) RecordJvmMemoryHeapCommittedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordJvmMemoryHeapCommittedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryHeapCommitted.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryHeapMaxDataPoint adds a data point to jvm.memory.heap.max metric. -func (mb *MetricsBuilder) RecordJvmMemoryHeapMaxDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordJvmMemoryHeapMaxDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryHeapMax.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryHeapUsedDataPoint adds a data point to jvm.memory.heap.used metric. -func (mb *MetricsBuilder) RecordJvmMemoryHeapUsedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordJvmMemoryHeapUsedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryHeapUsed.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryNonheapCommittedDataPoint adds a data point to jvm.memory.nonheap.committed metric. -func (mb *MetricsBuilder) RecordJvmMemoryNonheapCommittedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordJvmMemoryNonheapCommittedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryNonheapCommitted.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryNonheapUsedDataPoint adds a data point to jvm.memory.nonheap.used metric. -func (mb *MetricsBuilder) RecordJvmMemoryNonheapUsedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordJvmMemoryNonheapUsedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmMemoryNonheapUsed.recordDataPoint(mb.startTime, ts, val) } // RecordJvmMemoryPoolMaxDataPoint adds a data point to jvm.memory.pool.max metric. -func (mb *MetricsBuilder) RecordJvmMemoryPoolMaxDataPoint(ts pdata.Timestamp, val int64, memoryPoolNameAttributeValue string) { +func (mb *MetricsBuilder) RecordJvmMemoryPoolMaxDataPoint(ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { mb.metricJvmMemoryPoolMax.recordDataPoint(mb.startTime, ts, val, memoryPoolNameAttributeValue) } // RecordJvmMemoryPoolUsedDataPoint adds a data point to jvm.memory.pool.used metric. -func (mb *MetricsBuilder) RecordJvmMemoryPoolUsedDataPoint(ts pdata.Timestamp, val int64, memoryPoolNameAttributeValue string) { +func (mb *MetricsBuilder) RecordJvmMemoryPoolUsedDataPoint(ts pcommon.Timestamp, val int64, memoryPoolNameAttributeValue string) { mb.metricJvmMemoryPoolUsed.recordDataPoint(mb.startTime, ts, val, memoryPoolNameAttributeValue) } // RecordJvmThreadsCountDataPoint adds a data point to jvm.threads.count metric. -func (mb *MetricsBuilder) RecordJvmThreadsCountDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordJvmThreadsCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricJvmThreadsCount.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/elasticsearchreceiver/scraper.go b/receiver/elasticsearchreceiver/scraper.go index 424162a82916..684038236063 100644 --- a/receiver/elasticsearchreceiver/scraper.go +++ b/receiver/elasticsearchreceiver/scraper.go @@ -21,7 +21,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/elasticsearchreceiver/internal/metadata" @@ -54,13 +55,13 @@ func (r *elasticsearchScraper) start(_ context.Context, host component.Host) (er return } -func (r *elasticsearchScraper) scrape(ctx context.Context) (pdata.Metrics, error) { - metrics := pdata.NewMetrics() +func (r *elasticsearchScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { + metrics := pmetric.NewMetrics() rms := metrics.ResourceMetrics() errs := &scrapererror.ScrapeErrors{} - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) r.scrapeNodeMetrics(ctx, now, rms, errs) r.scrapeClusterMetrics(ctx, now, rms, errs) @@ -69,7 +70,7 @@ func (r *elasticsearchScraper) scrape(ctx context.Context) (pdata.Metrics, error } // scrapeNodeMetrics scrapes adds node-level metrics to the given MetricSlice from the NodeStats endpoint -func (r *elasticsearchScraper) scrapeNodeMetrics(ctx context.Context, now pdata.Timestamp, rms pdata.ResourceMetricsSlice, errs *scrapererror.ScrapeErrors) { +func (r *elasticsearchScraper) scrapeNodeMetrics(ctx context.Context, now pcommon.Timestamp, rms pmetric.ResourceMetricsSlice, errs *scrapererror.ScrapeErrors) { if len(r.cfg.Nodes) == 0 { return } @@ -174,7 +175,7 @@ func (r *elasticsearchScraper) scrapeNodeMetrics(ctx context.Context, now pdata. } } -func (r *elasticsearchScraper) scrapeClusterMetrics(ctx context.Context, now pdata.Timestamp, rms pdata.ResourceMetricsSlice, errs *scrapererror.ScrapeErrors) { +func (r *elasticsearchScraper) scrapeClusterMetrics(ctx context.Context, now pcommon.Timestamp, rms pmetric.ResourceMetricsSlice, errs *scrapererror.ScrapeErrors) { if r.cfg.SkipClusterMetrics { return } diff --git a/receiver/filelogreceiver/README.md b/receiver/filelogreceiver/README.md index 22be50b60750..fc7d53f02c14 100644 --- a/receiver/filelogreceiver/README.md +++ b/receiver/filelogreceiver/README.md @@ -27,7 +27,7 @@ Supported pipeline types: logs | `attributes` | {} | A map of `key: value` pairs to add to the entry's attributes | | `resource` | {} | A map of `key: value` pairs to add to the entry's resource | | `operators` | [] | An array of [operators](https://github.com/open-telemetry/opentelemetry-log-collection/blob/main/docs/operators/README.md#what-operators-are-available). See below for more details | -| `converter` |
{
max_flush_count: 100,
flush_interval: 100ms,
worker_count: max(1,runtime.NumCPU()/4)
}
| A map of `key: value` pairs to configure the [`entry.Entry`][entry_link] to [`pdata.LogRecord`][pdata_logrecord_link] converter, more info can be found [here][converter_link] | +| `converter` |
{
max_flush_count: 100,
flush_interval: 100ms,
worker_count: max(1,runtime.NumCPU()/4)
}
| A map of `key: value` pairs to configure the [`entry.Entry`][entry_link] to [`plog.LogRecord`][pdata_logrecord_link] converter, more info can be found [here][converter_link] | [entry_link]: https://github.com/open-telemetry/opentelemetry-log-collection/blob/v0.23.0/entry/entry.go#L43-L54 [pdata_logrecord_link]: https://github.com/open-telemetry/opentelemetry-collector/blob/v0.40.0/model/pdata/generated_log.go#L553-L564 diff --git a/receiver/filelogreceiver/filelog_test.go b/receiver/filelogreceiver/filelog_test.go index 21c0714fc84a..ff371b2c85cd 100644 --- a/receiver/filelogreceiver/filelog_test.go +++ b/receiver/filelogreceiver/filelog_test.go @@ -33,7 +33,7 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/config/configtest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/service/servicetest" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza" @@ -221,7 +221,7 @@ func (rt *rotationTest) Run(t *testing.T) { converter.Stop() } -func consumeNLogsFromConverter(ch <-chan pdata.Logs, count int, wg *sync.WaitGroup) { +func consumeNLogsFromConverter(ch <-chan plog.Logs, count int, wg *sync.WaitGroup) { defer wg.Done() n := 0 diff --git a/receiver/filelogreceiver/go.mod b/receiver/filelogreceiver/go.mod index ac2a34cf8dae..76a9ba9d873a 100644 --- a/receiver/filelogreceiver/go.mod +++ b/receiver/filelogreceiver/go.mod @@ -8,8 +8,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza v0.48.0 github.com/open-telemetry/opentelemetry-log-collection v0.29.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 gopkg.in/yaml.v2 v2.4.0 ) @@ -20,7 +20,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -28,7 +28,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/observiq/ctimefmt v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -46,3 +45,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza => ../../internal/stanza replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage => ../../extension/storage + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/filelogreceiver/go.sum b/receiver/filelogreceiver/go.sum index 7a3854e5a4fa..19e362aae272 100644 --- a/receiver/filelogreceiver/go.sum +++ b/receiver/filelogreceiver/go.sum @@ -22,7 +22,7 @@ github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmatcuk/doublestar/v3 v3.0.0 h1:TQtVPlDnAYwcrVNB2JiGuMc++H5qzWZd9PhkNo5WyHI= github.com/bmatcuk/doublestar/v3 v3.0.0/go.mod h1:6PcTVMw80pCY1RVuoqu3V++99uQB3vsSYKPTd8AWA0k= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -79,7 +79,6 @@ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -111,8 +110,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -172,8 +171,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -193,17 +190,17 @@ go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/receiver/filelogreceiver/storage_test.go b/receiver/filelogreceiver/storage_test.go index 673f35848ded..383ee4cea024 100644 --- a/receiver/filelogreceiver/storage_test.go +++ b/receiver/filelogreceiver/storage_test.go @@ -172,7 +172,7 @@ func (l *recallLogger) recall() []string { return l.written } -// TODO use stateless Convert() from #3125 to generate exact pdata.Logs +// TODO use stateless Convert() from #3125 to generate exact plog.Logs // for now, just validate body func expectLogs(sink *consumertest.LogsSink, expected []string) func() bool { return func() bool { diff --git a/receiver/fluentforwardreceiver/collector.go b/receiver/fluentforwardreceiver/collector.go index 60fb879c3069..9c243001d27b 100644 --- a/receiver/fluentforwardreceiver/collector.go +++ b/receiver/fluentforwardreceiver/collector.go @@ -19,14 +19,14 @@ import ( "go.opencensus.io/stats" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver/observ" ) // Collector acts as an aggregator of LogRecords so that we don't have to -// generate as many pdata.Logs instances...we can pre-batch the LogRecord +// generate as many plog.Logs instances...we can pre-batch the LogRecord // instances from several Forward events into one to hopefully reduce // allocations and GC overhead. type Collector struct { @@ -75,8 +75,8 @@ func fillBufferUntilChanEmpty(eventCh <-chan Event, buf []Event) []Event { } } -func collectLogRecords(events []Event) pdata.Logs { - out := pdata.NewLogs() +func collectLogRecords(events []Event) plog.Logs { + out := plog.NewLogs() rls := out.ResourceLogs().AppendEmpty() logSlice := rls.ScopeLogs().AppendEmpty().LogRecords() for i := range events { diff --git a/receiver/fluentforwardreceiver/conversion.go b/receiver/fluentforwardreceiver/conversion.go index 6bcdaa5ae55d..5140667342c4 100644 --- a/receiver/fluentforwardreceiver/conversion.go +++ b/receiver/fluentforwardreceiver/conversion.go @@ -22,7 +22,8 @@ import ( "time" "github.com/tinylib/msgp/msgp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) const tagAttributeKey = "fluent.tag" @@ -33,7 +34,7 @@ const tagAttributeKey = "fluent.tag" type Event interface { DecodeMsg(dc *msgp.Reader) error - LogRecords() pdata.LogRecordSlice + LogRecords() plog.LogRecordSlice Chunk() string Compressed() string } @@ -82,8 +83,8 @@ func (em EventMode) String() string { // parseInterfaceToMap takes map of interface objects and returns // AttributeValueMap -func parseInterfaceToMap(msi map[string]interface{}) pdata.Value { - rv := pdata.NewValueMap() +func parseInterfaceToMap(msi map[string]interface{}) pcommon.Value { + rv := pcommon.NewValueMap() am := rv.MapVal() am.EnsureCapacity(len(msi)) for k, value := range msi { @@ -94,8 +95,8 @@ func parseInterfaceToMap(msi map[string]interface{}) pdata.Value { // parseInterfaceToArray takes array of interface objects and returns // AttributeValueArray -func parseInterfaceToArray(ai []interface{}) pdata.Value { - iv := pdata.NewValueSlice() +func parseInterfaceToArray(ai []interface{}) pcommon.Value { + iv := pcommon.NewValueSlice() av := iv.SliceVal() av.EnsureCapacity(len(ai)) for _, value := range ai { @@ -105,32 +106,32 @@ func parseInterfaceToArray(ai []interface{}) pdata.Value { } // parseToAttributeValue converts interface object to AttributeValue -func parseToAttributeValue(val interface{}) pdata.Value { +func parseToAttributeValue(val interface{}) pcommon.Value { // See https://github.com/tinylib/msgp/wiki/Type-Mapping-Rules switch r := val.(type) { case bool: - return pdata.NewValueBool(r) + return pcommon.NewValueBool(r) case string: - return pdata.NewValueString(r) + return pcommon.NewValueString(r) case uint64: - return pdata.NewValueInt(int64(r)) + return pcommon.NewValueInt(int64(r)) case int64: - return pdata.NewValueInt(r) + return pcommon.NewValueInt(r) // Sometimes strings come in as bytes array case []byte: - return pdata.NewValueString(string(r)) + return pcommon.NewValueString(string(r)) case map[string]interface{}: return parseInterfaceToMap(r) case []interface{}: return parseInterfaceToArray(r) case float32: - return pdata.NewValueDouble(float64(r)) + return pcommon.NewValueDouble(float64(r)) case float64: - return pdata.NewValueDouble(r) + return pcommon.NewValueDouble(r) case nil: - return pdata.NewValueEmpty() + return pcommon.NewValueEmpty() default: - return pdata.NewValueString(fmt.Sprintf("%v", val)) + return pcommon.NewValueString(fmt.Sprintf("%v", val)) } } @@ -145,7 +146,7 @@ func timeFromTimestamp(ts interface{}) (time.Time, error) { } } -func decodeTimestampToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { +func decodeTimestampToLogRecord(dc *msgp.Reader, lr plog.LogRecord) error { tsIntf, err := dc.ReadIntf() if err != nil { return msgp.WrapError(err, "Time") @@ -156,11 +157,11 @@ func decodeTimestampToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { return msgp.WrapError(err, "Time") } - lr.SetTimestamp(pdata.NewTimestampFromTime(ts)) + lr.SetTimestamp(pcommon.NewTimestampFromTime(ts)) return nil } -func parseRecordToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { +func parseRecordToLogRecord(dc *msgp.Reader, lr plog.LogRecord) error { attrs := lr.Attributes() recordLen, err := dc.ReadMapHeader() @@ -199,16 +200,16 @@ func parseRecordToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { } type MessageEventLogRecord struct { - pdata.LogRecordSlice + plog.LogRecordSlice OptionsMap } -func (melr *MessageEventLogRecord) LogRecords() pdata.LogRecordSlice { +func (melr *MessageEventLogRecord) LogRecords() plog.LogRecordSlice { return melr.LogRecordSlice } func (melr *MessageEventLogRecord) DecodeMsg(dc *msgp.Reader) error { - melr.LogRecordSlice = pdata.NewLogRecordSlice() + melr.LogRecordSlice = plog.NewLogRecordSlice() log := melr.LogRecordSlice.AppendEmpty() var arrLen uint32 @@ -273,16 +274,16 @@ func parseOptions(dc *msgp.Reader) (OptionsMap, error) { } type ForwardEventLogRecords struct { - pdata.LogRecordSlice + plog.LogRecordSlice OptionsMap } -func (fe *ForwardEventLogRecords) LogRecords() pdata.LogRecordSlice { +func (fe *ForwardEventLogRecords) LogRecords() plog.LogRecordSlice { return fe.LogRecordSlice } func (fe *ForwardEventLogRecords) DecodeMsg(dc *msgp.Reader) (err error) { - fe.LogRecordSlice = pdata.NewLogRecordSlice() + fe.LogRecordSlice = plog.NewLogRecordSlice() var arrLen uint32 arrLen, err = dc.ReadArrayHeader() @@ -327,7 +328,7 @@ func (fe *ForwardEventLogRecords) DecodeMsg(dc *msgp.Reader) (err error) { return } -func parseEntryToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { +func parseEntryToLogRecord(dc *msgp.Reader, lr plog.LogRecord) error { arrLen, err := dc.ReadArrayHeader() if err != nil { return msgp.WrapError(err) @@ -345,18 +346,18 @@ func parseEntryToLogRecord(dc *msgp.Reader, lr pdata.LogRecord) error { } type PackedForwardEventLogRecords struct { - pdata.LogRecordSlice + plog.LogRecordSlice OptionsMap } -func (pfe *PackedForwardEventLogRecords) LogRecords() pdata.LogRecordSlice { +func (pfe *PackedForwardEventLogRecords) LogRecords() plog.LogRecordSlice { return pfe.LogRecordSlice } // DecodeMsg implements msgp.Decodable. This was originally code generated but // then manually copied here in order to handle the optional Options field. func (pfe *PackedForwardEventLogRecords) DecodeMsg(dc *msgp.Reader) error { - pfe.LogRecordSlice = pdata.NewLogRecordSlice() + pfe.LogRecordSlice = plog.NewLogRecordSlice() arrLen, err := dc.ReadArrayHeader() if err != nil { @@ -430,7 +431,7 @@ func (pfe *PackedForwardEventLogRecords) parseEntries(entriesRaw []byte, isGzipp msgpReader := msgp.NewReader(reader) for { - lr := pdata.NewLogRecord() + lr := plog.NewLogRecord() err := parseEntryToLogRecord(msgpReader, lr) if err != nil { if msgp.Cause(err) == io.EOF { diff --git a/receiver/fluentforwardreceiver/conversion_test.go b/receiver/fluentforwardreceiver/conversion_test.go index 96bda2372569..1a472b61ab28 100644 --- a/receiver/fluentforwardreceiver/conversion_test.go +++ b/receiver/fluentforwardreceiver/conversion_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tinylib/msgp/msgp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestMessageEventConversion(t *testing.T) { @@ -39,7 +39,7 @@ func TestMessageEventConversion(t *testing.T) { expected := Logs( Log{ Timestamp: 1593031012000000000, - Body: pdata.NewValueString("..."), + Body: pcommon.NewValueString("..."), Attributes: map[string]interface{}{ "container_id": "b00a67eb645849d6ab38ff8beb4aad035cc7e917bf123c3e9057c7e89fc73d2d", "container_name": "/unruffled_cannon", @@ -107,7 +107,7 @@ func TestAttributeTypeConversion(t *testing.T) { require.EqualValues(t, Logs( Log{ Timestamp: 5000000000000, - Body: pdata.NewValueEmpty(), + Body: pcommon.NewValueEmpty(), Attributes: map[string]interface{}{ "a": 5.0, "b": 6.0, @@ -252,21 +252,21 @@ func TestBodyConversion(t *testing.T) { le := event.LogRecords().At(0) le.Attributes().Sort() - body := pdata.NewValueMap() + body := pcommon.NewValueMap() body.MapVal().InsertString("a", "value") - bv := pdata.NewValueSlice() + bv := pcommon.NewValueSlice() bv.SliceVal().EnsureCapacity(2) bv.SliceVal().AppendEmpty().SetStringVal("first") bv.SliceVal().AppendEmpty().SetStringVal("second") body.MapVal().Insert("b", bv) - cv := pdata.NewValueMap() + cv := pcommon.NewValueMap() cv.MapVal().InsertInt("d", 24) body.MapVal().Insert("c", cv) // Sort the map, sometimes may get in a different order. - require.Equal(t, pdata.ValueTypeMap, le.Body().Type()) + require.Equal(t, pcommon.ValueTypeMap, le.Body().Type()) le.Body().MapVal().Sort() assert.EqualValues(t, Logs( Log{ diff --git a/receiver/fluentforwardreceiver/go.mod b/receiver/fluentforwardreceiver/go.mod index 53e4a3e50d5e..a9fe644e6da5 100644 --- a/receiver/fluentforwardreceiver/go.mod +++ b/receiver/fluentforwardreceiver/go.mod @@ -6,8 +6,8 @@ require ( github.com/stretchr/testify v1.7.1 github.com/tinylib/msgp v1.1.6 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -16,7 +16,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -25,7 +25,6 @@ require ( github.com/philhofer/fwd v1.1.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -35,3 +34,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/fluentforwardreceiver/go.sum b/receiver/fluentforwardreceiver/go.sum index dab71300146f..5724be27cd81 100644 --- a/receiver/fluentforwardreceiver/go.sum +++ b/receiver/fluentforwardreceiver/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -70,7 +70,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -150,8 +149,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -168,10 +165,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -211,7 +208,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -234,7 +231,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/fluentforwardreceiver/receiver_test.go b/receiver/fluentforwardreceiver/receiver_test.go index eee74c6f0d27..ee1d25a5379d 100644 --- a/receiver/fluentforwardreceiver/receiver_test.go +++ b/receiver/fluentforwardreceiver/receiver_test.go @@ -29,7 +29,8 @@ import ( "github.com/stretchr/testify/require" "github.com/tinylib/msgp/msgp" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" ) @@ -108,7 +109,7 @@ func TestMessageEvent(t *testing.T) { require.Equal(t, len(eventBytes), n) require.NoError(t, conn.Close()) - var converted []pdata.Logs + var converted []plog.Logs require.Eventually(t, func() bool { converted = next.AllLogs() return len(converted) == 1 @@ -117,7 +118,7 @@ func TestMessageEvent(t *testing.T) { converted[0].ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Sort() require.EqualValues(t, Logs(Log{ Timestamp: 1593031012000000000, - Body: pdata.NewValueString("..."), + Body: pcommon.NewValueString("..."), Attributes: map[string]interface{}{ "container_id": "b00a67eb645849d6ab38ff8beb4aad035cc7e917bf123c3e9057c7e89fc73d2d", "container_name": "/unruffled_cannon", @@ -140,7 +141,7 @@ func TestForwardEvent(t *testing.T) { require.Equal(t, len(eventBytes), n) require.NoError(t, conn.Close()) - var converted []pdata.Logs + var converted []plog.Logs require.Eventually(t, func() bool { converted = next.AllLogs() return len(converted) == 1 @@ -152,7 +153,7 @@ func TestForwardEvent(t *testing.T) { require.EqualValues(t, Logs( Log{ Timestamp: 1593032377776693638, - Body: pdata.NewValueEmpty(), + Body: pcommon.NewValueEmpty(), Attributes: map[string]interface{}{ "Mem.free": 848908, "Mem.total": 7155496, @@ -165,7 +166,7 @@ func TestForwardEvent(t *testing.T) { }, Log{ Timestamp: 1593032378756829346, - Body: pdata.NewValueEmpty(), + Body: pcommon.NewValueEmpty(), Attributes: map[string]interface{}{ "Mem.free": 848908, "Mem.total": 7155496, @@ -222,7 +223,7 @@ func TestForwardPackedEvent(t *testing.T) { require.Equal(t, len(eventBytes), n) require.NoError(t, conn.Close()) - var converted []pdata.Logs + var converted []plog.Logs require.Eventually(t, func() bool { converted = next.AllLogs() return len(converted) == 1 @@ -235,7 +236,7 @@ func TestForwardPackedEvent(t *testing.T) { require.EqualValues(t, Logs( Log{ Timestamp: 1593032517024597622, - Body: pdata.NewValueString("starting fluentd worker pid=17 ppid=7 worker=0"), + Body: pcommon.NewValueString("starting fluentd worker pid=17 ppid=7 worker=0"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", "pid": 17, @@ -245,21 +246,21 @@ func TestForwardPackedEvent(t *testing.T) { }, Log{ Timestamp: 1593032517028573686, - Body: pdata.NewValueString("delayed_commit_timeout is overwritten by ack_response_timeout"), + Body: pcommon.NewValueString("delayed_commit_timeout is overwritten by ack_response_timeout"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", }, }, Log{ Timestamp: 1593032517028815948, - Body: pdata.NewValueString("following tail of /var/log/kern.log"), + Body: pcommon.NewValueString("following tail of /var/log/kern.log"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", }, }, Log{ Timestamp: 1593032517031174229, - Body: pdata.NewValueString("fluentd worker is now running worker=0"), + Body: pcommon.NewValueString("fluentd worker is now running worker=0"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", "worker": 0, @@ -267,7 +268,7 @@ func TestForwardPackedEvent(t *testing.T) { }, Log{ Timestamp: 1593032522187382822, - Body: pdata.NewValueString("fluentd worker is now stopping worker=0"), + Body: pcommon.NewValueString("fluentd worker is now stopping worker=0"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", "worker": 0, @@ -288,7 +289,7 @@ func TestForwardPackedCompressedEvent(t *testing.T) { require.Equal(t, len(eventBytes), n) require.NoError(t, conn.Close()) - var converted []pdata.Logs + var converted []plog.Logs require.Eventually(t, func() bool { converted = next.AllLogs() return len(converted) == 1 @@ -301,7 +302,7 @@ func TestForwardPackedCompressedEvent(t *testing.T) { require.EqualValues(t, Logs( Log{ Timestamp: 1593032426012197420, - Body: pdata.NewValueString("starting fluentd worker pid=17 ppid=7 worker=0"), + Body: pcommon.NewValueString("starting fluentd worker pid=17 ppid=7 worker=0"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", "pid": 17, @@ -311,21 +312,21 @@ func TestForwardPackedCompressedEvent(t *testing.T) { }, Log{ Timestamp: 1593032426013724933, - Body: pdata.NewValueString("delayed_commit_timeout is overwritten by ack_response_timeout"), + Body: pcommon.NewValueString("delayed_commit_timeout is overwritten by ack_response_timeout"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", }, }, Log{ Timestamp: 1593032426020510455, - Body: pdata.NewValueString("following tail of /var/log/kern.log"), + Body: pcommon.NewValueString("following tail of /var/log/kern.log"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", }, }, Log{ Timestamp: 1593032426024346580, - Body: pdata.NewValueString("fluentd worker is now running worker=0"), + Body: pcommon.NewValueString("fluentd worker is now running worker=0"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", "worker": 0, @@ -333,7 +334,7 @@ func TestForwardPackedCompressedEvent(t *testing.T) { }, Log{ Timestamp: 1593032434346935532, - Body: pdata.NewValueString("fluentd worker is now stopping worker=0"), + Body: pcommon.NewValueString("fluentd worker is now stopping worker=0"), Attributes: map[string]interface{}{ "fluent.tag": "fluent.info", "worker": 0, @@ -368,7 +369,7 @@ func TestUnixEndpoint(t *testing.T) { require.NoError(t, err) require.Greater(t, n, 0) - var converted []pdata.Logs + var converted []plog.Logs require.Eventually(t, func() bool { converted = next.AllLogs() return len(converted) == 1 @@ -412,7 +413,7 @@ func TestHighVolume(t *testing.T) { wg.Wait() - var converted []pdata.Logs + var converted []plog.Logs require.Eventually(t, func() bool { converted = next.AllLogs() diff --git a/receiver/fluentforwardreceiver/util_test.go b/receiver/fluentforwardreceiver/util_test.go index b62e7df3239a..81c70f0d7149 100644 --- a/receiver/fluentforwardreceiver/util_test.go +++ b/receiver/fluentforwardreceiver/util_test.go @@ -15,14 +15,15 @@ package fluentforwardreceiver import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) // Log is a convenience struct for constructing logs for tests. // See Logs for rationale. type Log struct { Timestamp int64 - Body pdata.Value + Body pcommon.Value Attributes map[string]interface{} } @@ -30,15 +31,15 @@ type Log struct { // relatively easy to read and write declaratively compared to the highly // imperative and verbose method of using pdata directly. // Attributes are sorted by key name. -func Logs(recs ...Log) pdata.Logs { - out := pdata.NewLogs() +func Logs(recs ...Log) plog.Logs { + out := plog.NewLogs() logSlice := out.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords() logSlice.EnsureCapacity(len(recs)) for i := range recs { l := logSlice.AppendEmpty() recs[i].Body.CopyTo(l.Body()) - l.SetTimestamp(pdata.Timestamp(recs[i].Timestamp)) - pdata.NewMapFromRaw(recs[i].Attributes).CopyTo(l.Attributes()) + l.SetTimestamp(pcommon.Timestamp(recs[i].Timestamp)) + pcommon.NewMapFromRaw(recs[i].Attributes).CopyTo(l.Attributes()) l.Attributes().Sort() } diff --git a/receiver/googlecloudpubsubreceiver/go.mod b/receiver/googlecloudpubsubreceiver/go.mod index 89342bee3688..acdaba522c6c 100644 --- a/receiver/googlecloudpubsubreceiver/go.mod +++ b/receiver/googlecloudpubsubreceiver/go.mod @@ -5,8 +5,8 @@ go 1.17 require ( cloud.google.com/go/pubsub v1.19.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/atomic v1.9.0 go.uber.org/zap v1.21.0 google.golang.org/api v0.74.0 @@ -19,7 +19,7 @@ require ( cloud.google.com/go/compute v1.5.0 // indirect cloud.google.com/go/iam v0.3.0 // indirect github.com/benbjohnson/clock v1.3.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -27,7 +27,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.7 // indirect github.com/googleapis/gax-go/v2 v2.2.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -35,7 +35,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -50,3 +49,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/googlecloudpubsubreceiver/go.sum b/receiver/googlecloudpubsubreceiver/go.sum index d1bd40c443dd..c1b05a4633a3 100644 --- a/receiver/googlecloudpubsubreceiver/go.sum +++ b/receiver/googlecloudpubsubreceiver/go.sum @@ -77,8 +77,8 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -204,7 +204,6 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTKaONwE= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -239,8 +238,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -289,8 +288,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -315,17 +312,17 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= diff --git a/receiver/googlecloudpubsubreceiver/receiver.go b/receiver/googlecloudpubsubreceiver/receiver.go index ec0e28fdfbfa..b2be439a1057 100644 --- a/receiver/googlecloudpubsubreceiver/receiver.go +++ b/receiver/googlecloudpubsubreceiver/receiver.go @@ -27,9 +27,11 @@ import ( pubsub "cloud.google.com/go/pubsub/apiv1" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "google.golang.org/api/option" pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" @@ -49,9 +51,9 @@ type pubsubReceiver struct { userAgent string config *Config client *pubsub.SubscriberClient - tracesUnmarshaler pdata.TracesUnmarshaler - metricsUnmarshaler pdata.MetricsUnmarshaler - logsUnmarshaler pdata.LogsUnmarshaler + tracesUnmarshaler ptrace.Unmarshaler + metricsUnmarshaler pmetric.Unmarshaler + logsUnmarshaler plog.Unmarshaler handler *internal.StreamHandler startOnce sync.Once } @@ -113,9 +115,9 @@ func (receiver *pubsubReceiver) Start(ctx context.Context, _ component.Host) err return } }) - receiver.tracesUnmarshaler = otlp.NewProtobufTracesUnmarshaler() - receiver.metricsUnmarshaler = otlp.NewProtobufMetricsUnmarshaler() - receiver.logsUnmarshaler = otlp.NewProtobufLogsUnmarshaler() + receiver.tracesUnmarshaler = ptrace.NewProtoUnmarshaler() + receiver.metricsUnmarshaler = pmetric.NewProtoUnmarshaler() + receiver.logsUnmarshaler = plog.NewProtoUnmarshaler() return startErr } @@ -133,7 +135,7 @@ func (receiver *pubsubReceiver) handleLogStrings(ctx context.Context, message *p data := string(message.Message.Data) timestamp := message.GetMessage().PublishTime - out := pdata.NewLogs() + out := plog.NewLogs() logs := out.ResourceLogs() rls := logs.AppendEmpty() @@ -141,7 +143,7 @@ func (receiver *pubsubReceiver) handleLogStrings(ctx context.Context, message *p lr := ills.LogRecords().AppendEmpty() lr.Body().SetStringVal(data) - lr.SetTimestamp(pdata.NewTimestampFromTime(timestamp.AsTime())) + lr.SetTimestamp(pcommon.NewTimestampFromTime(timestamp.AsTime())) return receiver.logsConsumer.ConsumeLogs(ctx, out) } diff --git a/receiver/googlecloudpubsubreceiver/testdata/data.go b/receiver/googlecloudpubsubreceiver/testdata/data.go index 50ab1f960f8d..1b23f55e06d8 100644 --- a/receiver/googlecloudpubsubreceiver/testdata/data.go +++ b/receiver/googlecloudpubsubreceiver/testdata/data.go @@ -18,43 +18,44 @@ import ( "bytes" "compress/gzip" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) func CreateTraceExport() []byte { - out := pdata.NewTraces() + out := ptrace.NewTraces() resources := out.ResourceSpans() resource := resources.AppendEmpty() libs := resource.ScopeSpans() spans := libs.AppendEmpty().Spans() span := spans.AppendEmpty() span.SetName("test") - data, _ := otlp.NewProtobufTracesMarshaler().MarshalTraces(out) + data, _ := ptrace.NewProtoMarshaler().MarshalTraces(out) return data } func CreateMetricExport() []byte { - out := pdata.NewMetrics() + out := pmetric.NewMetrics() resources := out.ResourceMetrics() resource := resources.AppendEmpty() libs := resource.ScopeMetrics() metrics := libs.AppendEmpty().Metrics() metric := metrics.AppendEmpty() metric.SetName("test") - data, _ := otlp.NewProtobufMetricsMarshaler().MarshalMetrics(out) + data, _ := pmetric.NewProtoMarshaler().MarshalMetrics(out) return data } func CreateLogExport() []byte { - out := pdata.NewLogs() + out := plog.NewLogs() resources := out.ResourceLogs() resource := resources.AppendEmpty() libs := resource.ScopeLogs() logs := libs.AppendEmpty() log := logs.LogRecords().AppendEmpty() log.SetName("test") - data, _ := otlp.NewProtobufLogsMarshaler().MarshalLogs(out) + data, _ := plog.NewProtoMarshaler().MarshalLogs(out) return data } diff --git a/receiver/googlecloudspannerreceiver/go.mod b/receiver/googlecloudspannerreceiver/go.mod index d1d75d972eac..d79f41fd06de 100644 --- a/receiver/googlecloudspannerreceiver/go.mod +++ b/receiver/googlecloudspannerreceiver/go.mod @@ -5,8 +5,7 @@ go 1.17 require ( cloud.google.com/go/spanner v1.31.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 google.golang.org/api v0.74.0 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b @@ -21,6 +20,7 @@ require ( require ( github.com/ReneKroon/ttlcache/v2 v2.11.0 github.com/mitchellh/hashstructure v1.1.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( @@ -40,7 +40,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.7 // indirect github.com/googleapis/gax-go/v2 v2.2.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -48,7 +48,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -67,3 +66,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/googlecloudspannerreceiver/go.sum b/receiver/googlecloudspannerreceiver/go.sum index df0db4570df7..bdb6e448d78c 100644 --- a/receiver/googlecloudspannerreceiver/go.sum +++ b/receiver/googlecloudspannerreceiver/go.sum @@ -74,7 +74,7 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -213,7 +213,6 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTKaONwE= github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -249,8 +248,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -307,8 +306,6 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -335,17 +332,17 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= diff --git a/receiver/googlecloudspannerreceiver/internal/filterfactory/testhelpers_test.go b/receiver/googlecloudspannerreceiver/internal/filterfactory/testhelpers_test.go index 580a7622b145..67e15aaaf539 100644 --- a/receiver/googlecloudspannerreceiver/internal/filterfactory/testhelpers_test.go +++ b/receiver/googlecloudspannerreceiver/internal/filterfactory/testhelpers_test.go @@ -16,7 +16,7 @@ package filterfactory import ( "github.com/stretchr/testify/mock" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" @@ -50,7 +50,7 @@ func (f *mockFilter) LimitByTimestamp() int { } func generateMetadataItems(prefixes []string, prefixHighCardinality []bool) []*metadata.MetricsMetadata { - metricDataType := metadata.NewMetricDataType(pdata.MetricDataTypeGauge, pdata.MetricAggregationTemporalityUnspecified, false) + metricDataType := metadata.NewMetricDataType(pmetric.MetricDataTypeGauge, pmetric.MetricAggregationTemporalityUnspecified, false) metadataItems := make([]*metadata.MetricsMetadata, len(prefixes)) int64MetricValueMetadata, _ := metadata.NewMetricValueMetadata("int64", "int64Column", metricDataType, "int64Unit", metadata.IntValueType) float64MetricValueMetadata, _ := metadata.NewMetricValueMetadata("float64", "float64Column", metricDataType, "float64Unit", metadata.FloatValueType) diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue.go b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue.go index d32468016970..79ccbbbbc59a 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue.go @@ -19,7 +19,7 @@ import ( "sort" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) type newLabelValueFunction func(m LabelValueMetadata, value interface{}) LabelValue @@ -33,7 +33,7 @@ type LabelValueMetadata interface { type LabelValue interface { Metadata() LabelValueMetadata Value() interface{} - SetValueTo(attributes pdata.Map) + SetValueTo(attributes pcommon.Map) } type queryLabelValueMetadata struct { @@ -102,7 +102,7 @@ func (v stringLabelValue) Value() interface{} { return v.value } -func (v stringLabelValue) SetValueTo(attributes pdata.Map) { +func (v stringLabelValue) SetValueTo(attributes pcommon.Map) { attributes.InsertString(v.metadata.Name(), v.value) } @@ -121,7 +121,7 @@ func (v int64LabelValue) Value() interface{} { return v.value } -func (v int64LabelValue) SetValueTo(attributes pdata.Map) { +func (v int64LabelValue) SetValueTo(attributes pcommon.Map) { attributes.InsertInt(v.metadata.Name(), v.value) } @@ -140,7 +140,7 @@ func (v boolLabelValue) Value() interface{} { return v.value } -func (v boolLabelValue) SetValueTo(attributes pdata.Map) { +func (v boolLabelValue) SetValueTo(attributes pcommon.Map) { attributes.InsertBool(v.metadata.Name(), v.value) } @@ -159,7 +159,7 @@ func (v stringSliceLabelValue) Value() interface{} { return v.value } -func (v stringSliceLabelValue) SetValueTo(attributes pdata.Map) { +func (v stringSliceLabelValue) SetValueTo(attributes pcommon.Map) { attributes.InsertString(v.metadata.Name(), v.value) } @@ -184,7 +184,7 @@ func (v byteSliceLabelValue) Value() interface{} { return v.value } -func (v byteSliceLabelValue) SetValueTo(attributes pdata.Map) { +func (v byteSliceLabelValue) SetValueTo(attributes pcommon.Map) { attributes.InsertString(v.metadata.Name(), v.value) } @@ -203,7 +203,7 @@ func (v lockRequestSliceLabelValue) Value() interface{} { return v.value } -func (v lockRequestSliceLabelValue) SetValueTo(attributes pdata.Map) { +func (v lockRequestSliceLabelValue) SetValueTo(attributes pcommon.Map) { attributes.InsertString(v.metadata.Name(), v.value) } diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue_test.go index 38594ef3748b..65fec21d5bf9 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/labelvalue_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) func TestStringLabelValueMetadata(t *testing.T) { @@ -111,7 +111,7 @@ func TestStringLabelValue(t *testing.T) { assert.Equal(t, StringValueType, labelValue.Metadata().ValueType()) assert.Equal(t, stringValue, labelValue.Value()) - attributes := pdata.NewMap() + attributes := pcommon.NewMap() labelValue.SetValueTo(attributes) @@ -131,7 +131,7 @@ func TestInt64LabelValue(t *testing.T) { assert.Equal(t, IntValueType, labelValue.Metadata().ValueType()) assert.Equal(t, int64Value, labelValue.Value()) - attributes := pdata.NewMap() + attributes := pcommon.NewMap() labelValue.SetValueTo(attributes) @@ -151,7 +151,7 @@ func TestBoolLabelValue(t *testing.T) { assert.Equal(t, BoolValueType, labelValue.Metadata().ValueType()) assert.Equal(t, boolValue, labelValue.Value()) - attributes := pdata.NewMap() + attributes := pcommon.NewMap() labelValue.SetValueTo(attributes) @@ -171,7 +171,7 @@ func TestStringSliceLabelValue(t *testing.T) { assert.Equal(t, StringSliceValueType, labelValue.Metadata().ValueType()) assert.Equal(t, stringValue, labelValue.Value()) - attributes := pdata.NewMap() + attributes := pcommon.NewMap() labelValue.SetValueTo(attributes) @@ -191,7 +191,7 @@ func TestByteSliceLabelValue(t *testing.T) { assert.Equal(t, ByteSliceValueType, labelValue.Metadata().ValueType()) assert.Equal(t, stringValue, labelValue.Value()) - attributes := pdata.NewMap() + attributes := pcommon.NewMap() labelValue.SetValueTo(attributes) @@ -211,7 +211,7 @@ func TestLockRequestSliceLabelValue(t *testing.T) { assert.Equal(t, LockRequestSliceValueType, labelValue.Metadata().ValueType()) assert.Equal(t, stringValue, labelValue.Value()) - attributes := pdata.NewMap() + attributes := pcommon.NewMap() labelValue.SetValueTo(attributes) diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metadata_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metadata_test.go index 8b0b1e87633c..1ed74eb845fa 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metadata_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metadata_test.go @@ -14,7 +14,9 @@ package metadata -import "go.opentelemetry.io/collector/model/pdata" +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) const ( labelName = "LabelName" @@ -27,7 +29,7 @@ const ( metricName = "metricName" metricColumnName = "metricColumnName" - metricDataType = pdata.MetricDataTypeGauge + metricDataType = pmetric.MetricDataTypeGauge metricUnit = "metricUnit" metricNamePrefix = "metricNamePrefix-" diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype.go index 37d27046fbc9..25350923741f 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype.go @@ -14,21 +14,21 @@ package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" -import "go.opentelemetry.io/collector/model/pdata" +import "go.opentelemetry.io/collector/pdata/pmetric" type MetricDataType interface { - MetricDataType() pdata.MetricDataType - AggregationTemporality() pdata.MetricAggregationTemporality + MetricDataType() pmetric.MetricDataType + AggregationTemporality() pmetric.MetricAggregationTemporality IsMonotonic() bool } type metricValueDataType struct { - dataType pdata.MetricDataType - aggregationTemporality pdata.MetricAggregationTemporality + dataType pmetric.MetricDataType + aggregationTemporality pmetric.MetricAggregationTemporality isMonotonic bool } -func NewMetricDataType(dataType pdata.MetricDataType, aggregationTemporality pdata.MetricAggregationTemporality, +func NewMetricDataType(dataType pmetric.MetricDataType, aggregationTemporality pmetric.MetricAggregationTemporality, isMonotonic bool) MetricDataType { return metricValueDataType{ dataType: dataType, @@ -37,11 +37,11 @@ func NewMetricDataType(dataType pdata.MetricDataType, aggregationTemporality pda } } -func (metricValueDataType metricValueDataType) MetricDataType() pdata.MetricDataType { +func (metricValueDataType metricValueDataType) MetricDataType() pmetric.MetricDataType { return metricValueDataType.dataType } -func (metricValueDataType metricValueDataType) AggregationTemporality() pdata.MetricAggregationTemporality { +func (metricValueDataType metricValueDataType) AggregationTemporality() pmetric.MetricAggregationTemporality { return metricValueDataType.aggregationTemporality } diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go index 4a1338cd0924..9d040616e6f7 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricdatatype_test.go @@ -19,28 +19,28 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestNewMetricDataType(t *testing.T) { - metricDataType := NewMetricDataType(pdata.MetricDataTypeGauge, pdata.MetricAggregationTemporalityDelta, true) + metricDataType := NewMetricDataType(pmetric.MetricDataTypeGauge, pmetric.MetricAggregationTemporalityDelta, true) require.NotNil(t, metricDataType) - assert.Equal(t, metricDataType.MetricDataType(), pdata.MetricDataTypeGauge) - assert.Equal(t, metricDataType.AggregationTemporality(), pdata.MetricAggregationTemporalityDelta) + assert.Equal(t, metricDataType.MetricDataType(), pmetric.MetricDataTypeGauge) + assert.Equal(t, metricDataType.AggregationTemporality(), pmetric.MetricAggregationTemporalityDelta) assert.True(t, metricDataType.IsMonotonic()) } func TestMetricValueDataType_MetricDataType(t *testing.T) { - valueDataType := metricValueDataType{dataType: pdata.MetricDataTypeGauge} + valueDataType := metricValueDataType{dataType: pmetric.MetricDataTypeGauge} - assert.Equal(t, valueDataType.MetricDataType(), pdata.MetricDataTypeGauge) + assert.Equal(t, valueDataType.MetricDataType(), pmetric.MetricDataTypeGauge) } func TestMetricValueDataType_AggregationTemporality(t *testing.T) { - valueDataType := metricValueDataType{aggregationTemporality: pdata.MetricAggregationTemporalityDelta} + valueDataType := metricValueDataType{aggregationTemporality: pmetric.MetricAggregationTemporalityDelta} - assert.Equal(t, valueDataType.AggregationTemporality(), pdata.MetricAggregationTemporalityDelta) + assert.Equal(t, valueDataType.AggregationTemporality(), pmetric.MetricAggregationTemporalityDelta) } func TestMetricValueDataType_IsMonotonic(t *testing.T) { diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder.go index 9c889892236e..b755b5e71df7 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder.go @@ -15,7 +15,7 @@ package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" ) @@ -23,7 +23,7 @@ import ( const instrumentationLibraryName = "otelcol/googlecloudspannermetrics" type MetricsBuilder interface { - Build(dataPoints []*MetricsDataPoint) (pdata.Metrics, error) + Build(dataPoints []*MetricsDataPoint) (pmetric.Metrics, error) Shutdown() error } @@ -41,15 +41,15 @@ func (b *metricsFromDataPointBuilder) Shutdown() error { return b.filterResolver.Shutdown() } -func (b *metricsFromDataPointBuilder) Build(dataPoints []*MetricsDataPoint) (pdata.Metrics, error) { - var metrics pdata.Metrics +func (b *metricsFromDataPointBuilder) Build(dataPoints []*MetricsDataPoint) (pmetric.Metrics, error) { + var metrics pmetric.Metrics groupedDataPoints, err := b.groupAndFilter(dataPoints) if err != nil { - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } - metrics = pdata.NewMetrics() + metrics = pmetric.NewMetrics() rms := metrics.ResourceMetrics() rm := rms.AppendEmpty() @@ -63,12 +63,12 @@ func (b *metricsFromDataPointBuilder) Build(dataPoints []*MetricsDataPoint) (pda metric.SetUnit(key.MetricUnit) metric.SetDataType(key.MetricDataType.MetricDataType()) - var dataPointSlice pdata.NumberDataPointSlice + var dataPointSlice pmetric.NumberDataPointSlice switch key.MetricDataType.MetricDataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dataPointSlice = metric.Gauge().DataPoints() - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: metric.Sum().SetAggregationTemporality(key.MetricDataType.AggregationTemporality()) metric.Sum().SetIsMonotonic(key.MetricDataType.IsMonotonic()) dataPointSlice = metric.Sum().DataPoints() diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go index d0d53d2bc00a..bf70331f9602 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsbuilder_test.go @@ -22,7 +22,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" ) @@ -84,13 +85,13 @@ func TestNewMetricsFromDataPointBuilder(t *testing.T) { func TestMetricsFromDataPointBuilder_Build(t *testing.T) { testCases := map[string]struct { - metricsDataType pdata.MetricDataType + metricsDataType pmetric.MetricDataType expectedError error }{ - "Gauge": {pdata.MetricDataTypeGauge, nil}, - "Sum": {pdata.MetricDataTypeSum, nil}, - "Gauge with filtering error": {pdata.MetricDataTypeGauge, errors.New("filtering error")}, - "Sum with filtering error": {pdata.MetricDataTypeSum, errors.New("filtering error")}, + "Gauge": {pmetric.MetricDataTypeGauge, nil}, + "Sum": {pmetric.MetricDataTypeSum, nil}, + "Gauge with filtering error": {pmetric.MetricDataTypeGauge, errors.New("filtering error")}, + "Sum with filtering error": {pmetric.MetricDataTypeSum, errors.New("filtering error")}, } for name, testCase := range testCases { @@ -100,7 +101,7 @@ func TestMetricsFromDataPointBuilder_Build(t *testing.T) { } } -func testMetricsFromDataPointBuilderBuild(t *testing.T, metricDataType pdata.MetricDataType, expectedError error) { +func testMetricsFromDataPointBuilderBuild(t *testing.T, metricDataType pmetric.MetricDataType, expectedError error) { filterResolver := &mockItemFilterResolver{} dataForTesting := generateTestData(metricDataType) builder := &metricsFromDataPointBuilder{filterResolver: filterResolver} @@ -143,15 +144,15 @@ func testMetricsFromDataPointBuilderBuild(t *testing.T, metricDataType pdata.Met assert.Equal(t, expectedDataPoint.metricValue.Metadata().Unit(), ilMetric.Unit()) assert.Equal(t, expectedDataPoint.metricValue.Metadata().DataType().MetricDataType(), ilMetric.DataType()) - var dataPoint pdata.NumberDataPoint + var dataPoint pmetric.NumberDataPoint - if metricDataType == pdata.MetricDataTypeGauge { + if metricDataType == pmetric.MetricDataTypeGauge { assert.NotNil(t, ilMetric.Gauge()) assert.Equal(t, len(expectedDataPoints), ilMetric.Gauge().DataPoints().Len()) dataPoint = ilMetric.Gauge().DataPoints().At(dataPointIndex) } else { assert.NotNil(t, ilMetric.Sum()) - assert.Equal(t, pdata.MetricAggregationTemporalityDelta, ilMetric.Sum().AggregationTemporality()) + assert.Equal(t, pmetric.MetricAggregationTemporalityDelta, ilMetric.Sum().AggregationTemporality()) assert.True(t, ilMetric.Sum().IsMonotonic()) assert.Equal(t, len(expectedDataPoints), ilMetric.Sum().DataPoints().Len()) dataPoint = ilMetric.Sum().DataPoints().At(dataPointIndex) @@ -159,7 +160,7 @@ func testMetricsFromDataPointBuilderBuild(t *testing.T, metricDataType pdata.Met assertMetricValue(t, expectedDataPoint.metricValue, dataPoint) - assert.Equal(t, pdata.NewTimestampFromTime(expectedDataPoint.timestamp), dataPoint.Timestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(expectedDataPoint.timestamp), dataPoint.Timestamp()) // Adding +3 here because we'll always have 3 labels added for each metric: project_id, instance_id, database assert.Equal(t, 3+len(expectedDataPoint.labelValues), dataPoint.Attributes().Len()) @@ -292,7 +293,7 @@ func TestMetricsFromDataPointBuilder_Shutdown(t *testing.T) { } } -func generateTestData(metricDataType pdata.MetricDataType) testData { +func generateTestData(metricDataType pmetric.MetricDataType) testData { timestamp1 := time.Now().UTC() timestamp2 := timestamp1.Add(time.Minute) labelValues := allPossibleLabelValues() diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go index 17d9fe178019..0fbcb3b9f981 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go @@ -19,7 +19,8 @@ import ( "time" "github.com/mitchellh/hashstructure" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/filter" @@ -57,8 +58,8 @@ type label struct { Value interface{} } -func (mdp *MetricsDataPoint) CopyTo(dataPoint pdata.NumberDataPoint) { - dataPoint.SetTimestamp(pdata.NewTimestampFromTime(mdp.timestamp)) +func (mdp *MetricsDataPoint) CopyTo(dataPoint pmetric.NumberDataPoint) { + dataPoint.SetTimestamp(pcommon.NewTimestampFromTime(mdp.timestamp)) mdp.metricValue.SetValueTo(dataPoint) diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go index c9738af2241a..0ad3bedcd58e 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go @@ -20,7 +20,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" ) @@ -86,7 +87,7 @@ func TestMetricsDataPoint_CopyTo(t *testing.T) { databaseID := databaseID() for _, metricValue := range metricValues { - dataPoint := pdata.NewNumberDataPoint() + dataPoint := pmetric.NewNumberDataPoint() metricsDataPoint := &MetricsDataPoint{ metricName: metricName, timestamp: timestamp, @@ -99,7 +100,7 @@ func TestMetricsDataPoint_CopyTo(t *testing.T) { assertMetricValue(t, metricValue, dataPoint) - assert.Equal(t, pdata.NewTimestampFromTime(timestamp), dataPoint.Timestamp()) + assert.Equal(t, pcommon.NewTimestampFromTime(timestamp), dataPoint.Timestamp()) // Adding +3 here because we'll always have 3 labels added for each metric: project_id, instance_id, database assert.Equal(t, 3+len(labelValues), dataPoint.Attributes().Len()) @@ -152,8 +153,8 @@ func allPossibleLabelValues() []LabelValue { } } -func allPossibleMetricValues(metricDataType pdata.MetricDataType) []MetricValue { - dataType := NewMetricDataType(metricDataType, pdata.MetricAggregationTemporalityDelta, true) +func allPossibleMetricValues(metricDataType pmetric.MetricDataType) []MetricValue { + dataType := NewMetricDataType(metricDataType, pmetric.MetricAggregationTemporalityDelta, true) int64Metadata, _ := NewMetricValueMetadata("int64MetricName", "int64MetricColumnName", dataType, metricUnit, IntValueType) float64Metadata, _ := NewMetricValueMetadata("float64MetricName", "float64MetricColumnName", dataType, @@ -170,19 +171,19 @@ func allPossibleMetricValues(metricDataType pdata.MetricDataType) []MetricValue } } -func assertDefaultLabels(t *testing.T, attributesMap pdata.Map, databaseID *datasource.DatabaseID) { +func assertDefaultLabels(t *testing.T, attributesMap pcommon.Map, databaseID *datasource.DatabaseID) { assertStringLabelValue(t, attributesMap, projectIDLabelName, databaseID.ProjectID()) assertStringLabelValue(t, attributesMap, instanceIDLabelName, databaseID.InstanceID()) assertStringLabelValue(t, attributesMap, databaseLabelName, databaseID.DatabaseName()) } -func assertNonDefaultLabels(t *testing.T, attributesMap pdata.Map, labelValues []LabelValue) { +func assertNonDefaultLabels(t *testing.T, attributesMap pcommon.Map, labelValues []LabelValue) { for _, labelValue := range labelValues { assertLabelValue(t, attributesMap, labelValue) } } -func assertLabelValue(t *testing.T, attributesMap pdata.Map, labelValue LabelValue) { +func assertLabelValue(t *testing.T, attributesMap pcommon.Map, labelValue LabelValue) { value, exists := attributesMap.Get(labelValue.Metadata().Name()) assert.True(t, exists) @@ -198,14 +199,14 @@ func assertLabelValue(t *testing.T, attributesMap pdata.Map, labelValue LabelVal } } -func assertStringLabelValue(t *testing.T, attributesMap pdata.Map, labelName string, expectedValue interface{}) { +func assertStringLabelValue(t *testing.T, attributesMap pcommon.Map, labelName string, expectedValue interface{}) { value, exists := attributesMap.Get(labelName) assert.True(t, exists) assert.Equal(t, expectedValue, value.StringVal()) } -func assertMetricValue(t *testing.T, metricValue MetricValue, dataPoint pdata.NumberDataPoint) { +func assertMetricValue(t *testing.T, metricValue MetricValue, dataPoint pmetric.NumberDataPoint) { switch metricValue.(type) { case int64MetricValue: assert.Equal(t, metricValue.Value(), dataPoint.IntVal()) diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue.go index 7e415140911c..a82bede6f7cd 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue.go @@ -17,7 +17,7 @@ package metadata // import "github.com/open-telemetry/opentelemetry-collector-co import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) type newMetricValueFunction func(m MetricValueMetadata, value interface{}) MetricValue @@ -33,7 +33,7 @@ type MetricValueMetadata interface { type MetricValue interface { Metadata() MetricValueMetadata Value() interface{} - SetValueTo(ndp pdata.NumberDataPoint) + SetValueTo(ndp pmetric.NumberDataPoint) } type queryMetricValueMetadata struct { @@ -100,11 +100,11 @@ func (v float64MetricValue) Value() interface{} { return v.value } -func (v int64MetricValue) SetValueTo(point pdata.NumberDataPoint) { +func (v int64MetricValue) SetValueTo(point pmetric.NumberDataPoint) { point.SetIntVal(v.value) } -func (v float64MetricValue) SetValueTo(point pdata.NumberDataPoint) { +func (v float64MetricValue) SetValueTo(point pmetric.NumberDataPoint) { point.SetDoubleVal(v.value) } diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue_test.go index ae016b4478f9..10d334c4d24e 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricvalue_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestInt64MetricValueMetadata(t *testing.T) { @@ -71,7 +71,7 @@ func TestInt64MetricValue(t *testing.T) { assert.Equal(t, int64Value, metricValue.Value()) assert.Equal(t, IntValueType, metadata.ValueType()) - dataPoint := pdata.NewNumberDataPoint() + dataPoint := pmetric.NewNumberDataPoint() metricValue.SetValueTo(dataPoint) @@ -89,7 +89,7 @@ func TestFloat64MetricValue(t *testing.T) { assert.Equal(t, float64Value, metricValue.Value()) assert.Equal(t, FloatValueType, metadata.ValueType()) - dataPoint := pdata.NewNumberDataPoint() + dataPoint := pmetric.NewNumberDataPoint() metricValue.SetValueTo(dataPoint) diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go index 3d7dd3c66c11..b763be3de25f 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metadataparser_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" ) @@ -78,6 +78,6 @@ func assertMetricsMetadata(t *testing.T, expectedName string, metricsMetadata *m assert.Equal(t, "metric_name", metricsMetadata.QueryMetricValuesMetadata[0].Name()) assert.Equal(t, "METRIC_NAME", metricsMetadata.QueryMetricValuesMetadata[0].ColumnName()) assert.Equal(t, "metric_unit", metricsMetadata.QueryMetricValuesMetadata[0].Unit()) - assert.Equal(t, pdata.MetricDataTypeGauge, metricsMetadata.QueryMetricValuesMetadata[0].DataType().MetricDataType()) + assert.Equal(t, pmetric.MetricDataTypeGauge, metricsMetadata.QueryMetricValuesMetadata[0].DataType().MetricDataType()) assert.Equal(t, metadata.IntValueType, metricsMetadata.QueryMetricValuesMetadata[0].ValueType()) } diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metric_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metric_test.go index 3d31a3f9eb85..dfcefcf3d4d3 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadataparser/metric_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metric_test.go @@ -19,7 +19,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" ) @@ -34,17 +34,17 @@ func TestMetric_ToMetricValueMetadata(t *testing.T) { testCases := map[string]struct { valueType metadata.ValueType dataType MetricType - expectedDataType pdata.MetricDataType + expectedDataType pmetric.MetricDataType expectError bool }{ - "Value type is int and data type is gauge": {metadata.IntValueType, MetricType{DataType: GaugeMetricDataType}, pdata.MetricDataTypeGauge, false}, - "Value type is int and data type is sum": {metadata.IntValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pdata.MetricDataTypeSum, false}, - "Value type is int and data type is unknown": {metadata.IntValueType, MetricType{DataType: UnknownMetricDataType}, pdata.MetricDataTypeNone, true}, - "Value type is float and data type is gauge": {metadata.FloatValueType, MetricType{DataType: GaugeMetricDataType}, pdata.MetricDataTypeGauge, false}, - "Value type is float and data type is sum": {metadata.FloatValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pdata.MetricDataTypeSum, false}, - "Value type is float and data type is unknown": {metadata.FloatValueType, MetricType{DataType: UnknownMetricDataType}, pdata.MetricDataTypeNone, true}, - "Value type is unknown and data type is gauge": {metadata.UnknownValueType, MetricType{DataType: GaugeMetricDataType}, pdata.MetricDataTypeNone, true}, - "Value type is unknown and data type is sum": {metadata.UnknownValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pdata.MetricDataTypeNone, true}, + "Value type is int and data type is gauge": {metadata.IntValueType, MetricType{DataType: GaugeMetricDataType}, pmetric.MetricDataTypeGauge, false}, + "Value type is int and data type is sum": {metadata.IntValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pmetric.MetricDataTypeSum, false}, + "Value type is int and data type is unknown": {metadata.IntValueType, MetricType{DataType: UnknownMetricDataType}, pmetric.MetricDataTypeNone, true}, + "Value type is float and data type is gauge": {metadata.FloatValueType, MetricType{DataType: GaugeMetricDataType}, pmetric.MetricDataTypeGauge, false}, + "Value type is float and data type is sum": {metadata.FloatValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pmetric.MetricDataTypeSum, false}, + "Value type is float and data type is unknown": {metadata.FloatValueType, MetricType{DataType: UnknownMetricDataType}, pmetric.MetricDataTypeNone, true}, + "Value type is unknown and data type is gauge": {metadata.UnknownValueType, MetricType{DataType: GaugeMetricDataType}, pmetric.MetricDataTypeNone, true}, + "Value type is unknown and data type is sum": {metadata.UnknownValueType, MetricType{DataType: SumMetricDataType, Aggregation: DeltaAggregationType, Monotonic: true}, pmetric.MetricDataTypeNone, true}, } for name, testCase := range testCases { diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype.go index baa0f7d53bd8..117519fc0f84 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype.go +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype.go @@ -17,7 +17,7 @@ package metadataparser // import "github.com/open-telemetry/opentelemetry-collec import ( "errors" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" ) @@ -44,33 +44,33 @@ type MetricType struct { Monotonic bool `yaml:"monotonic"` } -func (metricType MetricType) dataType() (pdata.MetricDataType, error) { - var dataType pdata.MetricDataType +func (metricType MetricType) dataType() (pmetric.MetricDataType, error) { + var dataType pmetric.MetricDataType switch metricType.DataType { case GaugeMetricDataType: - dataType = pdata.MetricDataTypeGauge + dataType = pmetric.MetricDataTypeGauge case SumMetricDataType: - dataType = pdata.MetricDataTypeSum + dataType = pmetric.MetricDataTypeSum default: - return pdata.MetricDataTypeNone, errors.New("invalid data type received") + return pmetric.MetricDataTypeNone, errors.New("invalid data type received") } return dataType, nil } -func (metricType MetricType) aggregationTemporality() (pdata.MetricAggregationTemporality, error) { - var aggregationTemporality pdata.MetricAggregationTemporality +func (metricType MetricType) aggregationTemporality() (pmetric.MetricAggregationTemporality, error) { + var aggregationTemporality pmetric.MetricAggregationTemporality switch metricType.Aggregation { case DeltaAggregationType: - aggregationTemporality = pdata.MetricAggregationTemporalityDelta + aggregationTemporality = pmetric.MetricAggregationTemporalityDelta case CumulativeAggregationType: - aggregationTemporality = pdata.MetricAggregationTemporalityCumulative + aggregationTemporality = pmetric.MetricAggregationTemporalityCumulative case "": - aggregationTemporality = pdata.MetricAggregationTemporalityUnspecified + aggregationTemporality = pmetric.MetricAggregationTemporalityUnspecified default: - return pdata.MetricAggregationTemporalityUnspecified, errors.New("invalid aggregation temporality received") + return pmetric.MetricAggregationTemporalityUnspecified, errors.New("invalid aggregation temporality received") } return aggregationTemporality, nil diff --git a/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype_test.go b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype_test.go index 84bd2abde383..a6b9988c78cb 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadataparser/metrictype_test.go @@ -19,18 +19,18 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestDataType(t *testing.T) { testCases := map[string]struct { dataType MetricDataType - expectedDataType pdata.MetricDataType + expectedDataType pmetric.MetricDataType expectError bool }{ - "Gauge": {GaugeMetricDataType, pdata.MetricDataTypeGauge, false}, - "Sum": {SumMetricDataType, pdata.MetricDataTypeSum, false}, - "Invalid": {UnknownMetricDataType, pdata.MetricDataTypeNone, true}, + "Gauge": {GaugeMetricDataType, pmetric.MetricDataTypeGauge, false}, + "Sum": {SumMetricDataType, pmetric.MetricDataTypeSum, false}, + "Invalid": {UnknownMetricDataType, pmetric.MetricDataTypeNone, true}, } for name, testCase := range testCases { @@ -55,13 +55,13 @@ func TestDataType(t *testing.T) { func TestAggregationTemporality(t *testing.T) { testCases := map[string]struct { aggregationTemporality AggregationType - expectedAggregationTemporality pdata.MetricAggregationTemporality + expectedAggregationTemporality pmetric.MetricAggregationTemporality expectError bool }{ - "Cumulative": {CumulativeAggregationType, pdata.MetricAggregationTemporalityCumulative, false}, - "Delta": {DeltaAggregationType, pdata.MetricAggregationTemporalityDelta, false}, - "Empty": {"", pdata.MetricAggregationTemporalityUnspecified, false}, - "Invalid": {UnknownAggregationType, pdata.MetricAggregationTemporalityUnspecified, true}, + "Cumulative": {CumulativeAggregationType, pmetric.MetricAggregationTemporalityCumulative, false}, + "Delta": {DeltaAggregationType, pmetric.MetricAggregationTemporalityDelta, false}, + "Empty": {"", pmetric.MetricAggregationTemporalityUnspecified, false}, + "Invalid": {UnknownAggregationType, pmetric.MetricAggregationTemporalityUnspecified, true}, } for name, testCase := range testCases { @@ -87,14 +87,14 @@ func TestToMetricDataType(t *testing.T) { testCases := map[string]struct { dataType MetricDataType aggregationTemporality AggregationType - expectedDataType pdata.MetricDataType - expectedAggregationTemporality pdata.MetricAggregationTemporality + expectedDataType pmetric.MetricDataType + expectedAggregationTemporality pmetric.MetricAggregationTemporality isMonotonic bool expectError bool }{ - "Happy path": {GaugeMetricDataType, CumulativeAggregationType, pdata.MetricDataTypeGauge, pdata.MetricAggregationTemporalityCumulative, true, false}, - "Invalid data type": {"invalid", CumulativeAggregationType, pdata.MetricDataTypeNone, pdata.MetricAggregationTemporalityCumulative, true, true}, - "Invalid aggregation": {GaugeMetricDataType, "invalid", pdata.MetricDataTypeGauge, pdata.MetricAggregationTemporalityUnspecified, true, true}, + "Happy path": {GaugeMetricDataType, CumulativeAggregationType, pmetric.MetricDataTypeGauge, pmetric.MetricAggregationTemporalityCumulative, true, false}, + "Invalid data type": {"invalid", CumulativeAggregationType, pmetric.MetricDataTypeNone, pmetric.MetricAggregationTemporalityCumulative, true, true}, + "Invalid aggregation": {GaugeMetricDataType, "invalid", pmetric.MetricDataTypeGauge, pmetric.MetricAggregationTemporalityUnspecified, true, true}, } for name, testCase := range testCases { diff --git a/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go index ea8f441c4735..bdf0581bd579 100644 --- a/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go +++ b/receiver/googlecloudspannerreceiver/internal/statsreader/statsreaders_mockedspanner_test.go @@ -24,7 +24,7 @@ import ( "cloud.google.com/go/spanner/spannertest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "google.golang.org/api/option" databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" @@ -50,7 +50,7 @@ func createMetricsMetadataFromTimestampColumn(query string, timestampColumn stri // Labels queryLabelValuesMetadata := []metadata.LabelValueMetadata{labelValueMetadata} - metricDataType := metadata.NewMetricDataType(pdata.MetricDataTypeGauge, pdata.MetricAggregationTemporalityUnspecified, false) + metricDataType := metadata.NewMetricDataType(pmetric.MetricDataTypeGauge, pmetric.MetricAggregationTemporalityUnspecified, false) metricValueMetadata, _ := metadata.NewMetricValueMetadata("metric_value", "METRIC_VALUE", metricDataType, "unit", metadata.IntValueType) diff --git a/receiver/googlecloudspannerreceiver/receiver.go b/receiver/googlecloudspannerreceiver/receiver.go index 3f6141d21f30..2488bd8db22e 100644 --- a/receiver/googlecloudspannerreceiver/receiver.go +++ b/receiver/googlecloudspannerreceiver/receiver.go @@ -20,7 +20,7 @@ import ( "fmt" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/datasource" @@ -50,13 +50,13 @@ func newGoogleCloudSpannerReceiver(logger *zap.Logger, config *Config) *googleCl } } -func (r *googleCloudSpannerReceiver) Scrape(ctx context.Context) (pdata.Metrics, error) { +func (r *googleCloudSpannerReceiver) Scrape(ctx context.Context) (pmetric.Metrics, error) { var allMetricsDataPoints []*metadata.MetricsDataPoint for _, projectReader := range r.projectReaders { dataPoints, err := projectReader.Read(ctx) if err != nil { - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } allMetricsDataPoints = append(allMetricsDataPoints, dataPoints...) diff --git a/receiver/googlecloudspannerreceiver/receiver_test.go b/receiver/googlecloudspannerreceiver/receiver_test.go index 2e38a9c9d71b..eb32897974ca 100644 --- a/receiver/googlecloudspannerreceiver/receiver_test.go +++ b/receiver/googlecloudspannerreceiver/receiver_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap/zaptest" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" @@ -62,8 +62,8 @@ func newMetricsBuilder(throwErrorOnShutdown bool) metadata.MetricsBuilder { } } -func (b *metricsBuilder) Build([]*metadata.MetricsDataPoint) (pdata.Metrics, error) { - return pdata.Metrics{}, nil +func (b *metricsBuilder) Build([]*metadata.MetricsDataPoint) (pmetric.Metrics, error) { + return pmetric.Metrics{}, nil } func (b *metricsBuilder) Shutdown() error { diff --git a/receiver/hostmetricsreceiver/go.mod b/receiver/hostmetricsreceiver/go.mod index 5aed0a35bbeb..d406560ccead 100644 --- a/receiver/hostmetricsreceiver/go.mod +++ b/receiver/hostmetricsreceiver/go.mod @@ -7,8 +7,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 github.com/shirou/gopsutil/v3 v3.22.3 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 @@ -19,7 +20,7 @@ require ( github.com/go-ole/go-ole v1.2.6 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -28,7 +29,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect @@ -44,3 +44,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/hostmetricsreceiver/go.sum b/receiver/hostmetricsreceiver/go.sum index 2ea5da7488ba..106eef54932b 100644 --- a/receiver/hostmetricsreceiver/go.sum +++ b/receiver/hostmetricsreceiver/go.sum @@ -18,7 +18,7 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -79,7 +79,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -110,8 +109,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -175,8 +174,6 @@ github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIH github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00= github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -199,17 +196,19 @@ github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPR github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -246,7 +245,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go index 453f0be38328..73534f02cc6e 100644 --- a/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go +++ b/receiver/hostmetricsreceiver/hostmetrics_receiver_test.go @@ -29,8 +29,8 @@ import ( "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" @@ -145,7 +145,7 @@ func TestGatherMetrics_EndToEnd(t *testing.T) { }, waitFor, tick, "No metrics were collected after %v", waitFor) } -func assertIncludesExpectedMetrics(t *testing.T, got pdata.Metrics) { +func assertIncludesExpectedMetrics(t *testing.T, got pmetric.Metrics) { // get the superset of metrics returned by all resource metrics (excluding the first) returnedMetrics := make(map[string]struct{}) returnedResourceMetrics := make(map[string]struct{}) @@ -183,13 +183,13 @@ func assertIncludesExpectedMetrics(t *testing.T, got pdata.Metrics) { } } -func getMetricSlice(t *testing.T, rm pdata.ResourceMetrics) pdata.MetricSlice { +func getMetricSlice(t *testing.T, rm pmetric.ResourceMetrics) pmetric.MetricSlice { ilms := rm.ScopeMetrics() require.Equal(t, 1, ilms.Len()) return ilms.At(0).Metrics() } -func getReturnedMetricNames(metrics pdata.MetricSlice) map[string]struct{} { +func getReturnedMetricNames(metrics pmetric.MetricSlice) map[string]struct{} { metricNames := make(map[string]struct{}) for i := 0; i < metrics.Len(); i++ { metricNames[metrics.At(i).Name()] = struct{}{} @@ -219,8 +219,8 @@ func (m *mockFactory) CreateMetricsScraper(context.Context, *zap.Logger, interna func (m *mockScraper) ID() config.ComponentID { return config.NewComponentID("") } func (m *mockScraper) Start(context.Context, component.Host) error { return nil } func (m *mockScraper) Shutdown(context.Context) error { return nil } -func (m *mockScraper) Scrape(context.Context) (pdata.Metrics, error) { - return pdata.NewMetrics(), errors.New("err1") +func (m *mockScraper) Scrape(context.Context) (pmetric.Metrics, error) { + return pmetric.NewMetrics(), errors.New("err1") } func TestGatherMetrics_ScraperKeyConfigError(t *testing.T) { @@ -253,7 +253,7 @@ func (s *notifyingSink) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (s *notifyingSink) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +func (s *notifyingSink) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { if md.MetricCount() > 0 { s.receivedMetrics = true } diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go index 0d3bd9b287b6..f3b2bbbfa82a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper.go @@ -21,7 +21,8 @@ import ( "github.com/shirou/gopsutil/v3/cpu" "github.com/shirou/gopsutil/v3/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata" @@ -52,15 +53,15 @@ func (s *scraper) start(context.Context, component.Host) error { if err != nil { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(s.now()) +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(s.now()) cpuTimes, err := s.times( /*percpu=*/ true) if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } for _, cpuTime := range cpuTimes { @@ -69,7 +70,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { err = s.ucal.CalculateAndRecord(now, cpuTimes, s.recordCPUUtilization) if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } return s.mb.Emit(), nil diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go index 1d528c071471..232a1c8ddcb2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_linux.go @@ -19,13 +19,13 @@ package cpuscraper // import "github.com/open-telemetry/opentelemetry-collector- import ( "github.com/shirou/gopsutil/v3/cpu" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal" ) -func (s *scraper) recordCPUTimeStateDataPoints(now pdata.Timestamp, cpuTime cpu.TimesStat) { +func (s *scraper) recordCPUTimeStateDataPoints(now pcommon.Timestamp, cpuTime cpu.TimesStat) { s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.User, cpuTime.CPU, metadata.AttributeState.User) s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.System, cpuTime.CPU, metadata.AttributeState.System) s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Idle, cpuTime.CPU, metadata.AttributeState.Idle) @@ -36,7 +36,7 @@ func (s *scraper) recordCPUTimeStateDataPoints(now pdata.Timestamp, cpuTime cpu. s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Iowait, cpuTime.CPU, metadata.AttributeState.Wait) } -func (s *scraper) recordCPUUtilization(now pdata.Timestamp, cpuUtilization ucal.CPUUtilization) { +func (s *scraper) recordCPUUtilization(now pcommon.Timestamp, cpuUtilization ucal.CPUUtilization) { s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.User, cpuUtilization.CPU, metadata.AttributeState.User) s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.System, cpuUtilization.CPU, metadata.AttributeState.System) s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Idle, cpuUtilization.CPU, metadata.AttributeState.Idle) diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go index 6f58ea23986a..74a50e0a7105 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_others.go @@ -19,20 +19,20 @@ package cpuscraper // import "github.com/open-telemetry/opentelemetry-collector- import ( "github.com/shirou/gopsutil/v3/cpu" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal" ) -func (s *scraper) recordCPUTimeStateDataPoints(now pdata.Timestamp, cpuTime cpu.TimesStat) { +func (s *scraper) recordCPUTimeStateDataPoints(now pcommon.Timestamp, cpuTime cpu.TimesStat) { s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.User, cpuTime.CPU, metadata.AttributeState.User) s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.System, cpuTime.CPU, metadata.AttributeState.System) s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Idle, cpuTime.CPU, metadata.AttributeState.Idle) s.mb.RecordSystemCPUTimeDataPoint(now, cpuTime.Irq, cpuTime.CPU, metadata.AttributeState.Interrupt) } -func (s *scraper) recordCPUUtilization(now pdata.Timestamp, cpuUtilization ucal.CPUUtilization) { +func (s *scraper) recordCPUUtilization(now pcommon.Timestamp, cpuUtilization ucal.CPUUtilization) { s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.User, cpuUtilization.CPU, metadata.AttributeState.User) s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.System, cpuUtilization.CPU, metadata.AttributeState.System) s.mb.RecordSystemCPUUtilizationDataPoint(now, cpuUtilization.Idle, cpuUtilization.CPU, metadata.AttributeState.Idle) diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go index c7dbcaf3f755..13351104aaac 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/cpu_scraper_test.go @@ -25,7 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" @@ -39,7 +40,7 @@ func TestScrape(t *testing.T) { timesFunc func(bool) ([]cpu.TimesStat, error) metricsConfig metadata.MetricsSettings expectedMetricCount int - expectedStartTime pdata.Timestamp + expectedStartTime pcommon.Timestamp initializationErr string expectedErr string } @@ -328,7 +329,7 @@ func TestScrape_CpuUtilizationStandard(t *testing.T) { assert.Equal(t, expectedDataPoints, dp.Len()) //remove empty values to make the test more simple - dp.RemoveIf(func(n pdata.NumberDataPoint) bool { + dp.RemoveIf(func(n pmetric.NumberDataPoint) bool { return n.DoubleVal() == 0.0 }) @@ -338,7 +339,7 @@ func TestScrape_CpuUtilizationStandard(t *testing.T) { } } -func assertDatapointValueAndStringAttributes(t *testing.T, dp pdata.NumberDataPoint, value float64, attrs map[string]string) { +func assertDatapointValueAndStringAttributes(t *testing.T, dp pmetric.NumberDataPoint, value float64, attrs map[string]string) { assert.InDelta(t, value, dp.DoubleVal(), 0.0001) for k, v := range attrs { cpuAttribute, exists := dp.Attributes().Get(k) @@ -347,51 +348,51 @@ func assertDatapointValueAndStringAttributes(t *testing.T, dp pdata.NumberDataPo } } -func assertCPUMetricValid(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { - expected := pdata.NewMetric() +func assertCPUMetricValid(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) { + expected := pmetric.NewMetric() expected.SetName("system.cpu.time") expected.SetDescription("Total CPU seconds broken down by different states.") expected.SetUnit("s") - expected.SetDataType(pdata.MetricDataTypeSum) + expected.SetDataType(pmetric.MetricDataTypeSum) internal.AssertDescriptorEqual(t, expected, metric) if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, metric, startTime) } assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 4*runtime.NumCPU()) internal.AssertSumMetricHasAttribute(t, metric, 0, metadata.Attributes.Cpu) - internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.User)) - internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.System)) - internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Idle)) - internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Interrupt)) + internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.User)) + internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.System)) + internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Idle)) + internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Interrupt)) } -func assertCPUMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { - internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Nice)) - internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Softirq)) - internal.AssertSumMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Steal)) - internal.AssertSumMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Wait)) +func assertCPUMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) { + internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Nice)) + internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Softirq)) + internal.AssertSumMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Steal)) + internal.AssertSumMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Wait)) } -func assertCPUUtilizationMetricValid(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { - expected := pdata.NewMetric() +func assertCPUUtilizationMetricValid(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) { + expected := pmetric.NewMetric() expected.SetName("system.cpu.utilization") expected.SetDescription("Percentage of CPU time broken down by different states.") expected.SetUnit("1") - expected.SetDataType(pdata.MetricDataTypeGauge) + expected.SetDataType(pmetric.MetricDataTypeGauge) internal.AssertDescriptorEqual(t, expected, metric) if startTime != 0 { internal.AssertGaugeMetricStartTimeEquals(t, metric, startTime) } internal.AssertGaugeMetricHasAttribute(t, metric, 0, metadata.Attributes.Cpu) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.User)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.System)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Idle)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Interrupt)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.User)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.System)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Idle)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Interrupt)) } -func assertCPUUtilizationMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { - internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Nice)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Softirq)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Steal)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Wait)) +func assertCPUUtilizationMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) { + internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Nice)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Softirq)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 6, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Steal)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 7, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Wait)) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go index 5c067a44f6ae..7e740dc28cd2 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -32,7 +33,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemCPUTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -42,13 +43,13 @@ func (m *metricSystemCPUTime) init() { m.data.SetName("system.cpu.time") m.data.SetDescription("Total CPU seconds broken down by different states.") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemCPUTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { +func (m *metricSystemCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -56,8 +57,8 @@ func (m *metricSystemCPUTime) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Cpu, pdata.NewValueString(cpuAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Cpu, pcommon.NewValueString(cpuAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -68,7 +69,7 @@ func (m *metricSystemCPUTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemCPUTime) emit(metrics pdata.MetricSlice) { +func (m *metricSystemCPUTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -79,14 +80,14 @@ func (m *metricSystemCPUTime) emit(metrics pdata.MetricSlice) { func newMetricSystemCPUTime(settings MetricSettings) metricSystemCPUTime { m := metricSystemCPUTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemCPUUtilization struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -96,11 +97,11 @@ func (m *metricSystemCPUUtilization) init() { m.data.SetName("system.cpu.utilization") m.data.SetDescription("Percentage of CPU time broken down by different states.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemCPUUtilization) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { +func (m *metricSystemCPUUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -108,8 +109,8 @@ func (m *metricSystemCPUUtilization) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Cpu, pdata.NewValueString(cpuAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Cpu, pcommon.NewValueString(cpuAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -120,7 +121,7 @@ func (m *metricSystemCPUUtilization) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemCPUUtilization) emit(metrics pdata.MetricSlice) { +func (m *metricSystemCPUUtilization) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -131,7 +132,7 @@ func (m *metricSystemCPUUtilization) emit(metrics pdata.MetricSlice) { func newMetricSystemCPUUtilization(settings MetricSettings) metricSystemCPUUtilization { m := metricSystemCPUUtilization{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -140,10 +141,10 @@ func newMetricSystemCPUUtilization(settings MetricSettings) metricSystemCPUUtili // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemCPUTime metricSystemCPUTime metricSystemCPUUtilization metricSystemCPUUtilization } @@ -152,7 +153,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -160,8 +161,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemCPUTime: newMetricSystemCPUTime(settings.SystemCPUTime), metricSystemCPUUtilization: newMetricSystemCPUUtilization(settings.SystemCPUUtilization), } @@ -172,7 +173,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -182,14 +183,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -209,27 +210,27 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemCPUTimeDataPoint adds a data point to system.cpu.time metric. -func (mb *MetricsBuilder) RecordSystemCPUTimeDataPoint(ts pdata.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemCPUTimeDataPoint(ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { mb.metricSystemCPUTime.recordDataPoint(mb.startTime, ts, val, cpuAttributeValue, stateAttributeValue) } // RecordSystemCPUUtilizationDataPoint adds a data point to system.cpu.utilization metric. -func (mb *MetricsBuilder) RecordSystemCPUUtilizationDataPoint(ts pdata.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemCPUUtilizationDataPoint(ts pcommon.Timestamp, val float64, cpuAttributeValue string, stateAttributeValue string) { mb.metricSystemCPUUtilization.recordDataPoint(mb.startTime, ts, val, cpuAttributeValue, stateAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator.go index 52f7faff4f2f..27f91ec45d5c 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator.go @@ -19,7 +19,7 @@ import ( "fmt" "github.com/shirou/gopsutil/v3/cpu" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) var ErrTimeStatNotFound = errors.New("cannot find TimesStat for cpu") @@ -46,7 +46,7 @@ type CPUUtilizationCalculator struct { // CalculateAndRecord calculates the cpu utilization for the different cpu states comparing previously // stored []cpu.TimesStat and time.Time and current []cpu.TimesStat and current time.Time // If no previous data is stored it will return empty slice of CPUUtilization and no error -func (c *CPUUtilizationCalculator) CalculateAndRecord(now pdata.Timestamp, cpuTimes []cpu.TimesStat, recorder func(pdata.Timestamp, CPUUtilization)) error { +func (c *CPUUtilizationCalculator) CalculateAndRecord(now pcommon.Timestamp, cpuTimes []cpu.TimesStat, recorder func(pcommon.Timestamp, CPUUtilization)) error { if c.previousCPUTimes != nil { for _, previousCPUTime := range c.previousCPUTimes { currentCPUTime, err := cpuTimeForCPU(previousCPUTime.CPU, cpuTimes) diff --git a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator_test.go b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator_test.go index 1dfcdb78dcbb..46ba827d3e90 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/cpuscraper/ucal/cpu_utilization_calculator_test.go @@ -19,14 +19,14 @@ import ( "github.com/shirou/gopsutil/v3/cpu" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) type inMemoryRecorder struct { cpuUtilizations []CPUUtilization } -func (r *inMemoryRecorder) record(_ pdata.Timestamp, utilization CPUUtilization) { +func (r *inMemoryRecorder) record(_ pcommon.Timestamp, utilization CPUUtilization) { r.cpuUtilizations = append(r.cpuUtilizations, utilization) } @@ -34,7 +34,7 @@ func TestCpuUtilizationCalculator_Calculate(t *testing.T) { t.Parallel() testCases := []struct { name string - now pdata.Timestamp + now pcommon.Timestamp cpuTimes []cpu.TimesStat previousCPUTimes []cpu.TimesStat expectedUtilizations []CPUUtilization diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go index 951891ee7777..4433a6d697ce 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others.go @@ -25,7 +25,8 @@ import ( "github.com/shirou/gopsutil/v3/disk" "github.com/shirou/gopsutil/v3/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -40,7 +41,7 @@ const ( // scraper for Disk Metrics type scraper struct { config *Config - startTime pdata.Timestamp + startTime pcommon.Timestamp mb *metadata.MetricsBuilder includeFS filterset.FilterSet excludeFS filterset.FilterSet @@ -79,16 +80,16 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.startTime = pdata.Timestamp(bootTime * 1e9) + s.startTime = pcommon.Timestamp(bootTime * 1e9) s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(s.startTime)) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) ioCounters, err := s.ioCounters() if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } // filter devices by name @@ -106,34 +107,34 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { return s.mb.Emit(), nil } -func (s *scraper) recordDiskIOMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordDiskIOMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.ReadBytes), device, metadata.AttributeDirection.Read) s.mb.RecordSystemDiskIoDataPoint(now, int64(ioCounter.WriteBytes), device, metadata.AttributeDirection.Write) } } -func (s *scraper) recordDiskOperationsMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordDiskOperationsMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.ReadCount), device, metadata.AttributeDirection.Read) s.mb.RecordSystemDiskOperationsDataPoint(now, int64(ioCounter.WriteCount), device, metadata.AttributeDirection.Write) } } -func (s *scraper) recordDiskIOTimeMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordDiskIOTimeMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { s.mb.RecordSystemDiskIoTimeDataPoint(now, float64(ioCounter.IoTime)/1e3, device) } } -func (s *scraper) recordDiskOperationTimeMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordDiskOperationTimeMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.ReadTime)/1e3, device, metadata.AttributeDirection.Read) s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(ioCounter.WriteTime)/1e3, device, metadata.AttributeDirection.Write) } } -func (s *scraper) recordDiskPendingOperationsMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordDiskPendingOperationsMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { s.mb.RecordSystemDiskPendingOperationsDataPoint(now, int64(ioCounter.IopsInProgress), device) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go index a248e2acfdc2..977f86cb71e4 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_fallback.go @@ -19,10 +19,10 @@ package diskscraper // import "github.com/open-telemetry/opentelemetry-collector import ( "github.com/shirou/gopsutil/v3/disk" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) const systemSpecificMetricsLen = 0 -func (s *scraper) recordSystemSpecificDataPoints(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordSystemSpecificDataPoints(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go index 4aafedeaa716..cc19343fbeef 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_others_linux.go @@ -19,25 +19,25 @@ package diskscraper // import "github.com/open-telemetry/opentelemetry-collector import ( "github.com/shirou/gopsutil/v3/disk" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata" ) const systemSpecificMetricsLen = 2 -func (s *scraper) recordSystemSpecificDataPoints(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordSystemSpecificDataPoints(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { s.recordDiskWeightedIOTimeMetric(now, ioCounters) s.recordDiskMergedMetric(now, ioCounters) } -func (s *scraper) recordDiskWeightedIOTimeMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordDiskWeightedIOTimeMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { s.mb.RecordSystemDiskWeightedIoTimeDataPoint(now, float64(ioCounter.WeightedIO)/1e3, device) } } -func (s *scraper) recordDiskMergedMetric(now pdata.Timestamp, ioCounters map[string]disk.IOCountersStat) { +func (s *scraper) recordDiskMergedMetric(now pcommon.Timestamp, ioCounters map[string]disk.IOCountersStat) { for device, ioCounter := range ioCounters { s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedReadCount), device, metadata.AttributeDirection.Read) s.mb.RecordSystemDiskMergedDataPoint(now, int64(ioCounter.MergedWriteCount), device, metadata.AttributeDirection.Write) diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go index 8366a1f82735..cb47e2f0c4ba 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_test.go @@ -22,7 +22,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" @@ -37,7 +38,7 @@ func TestScrape(t *testing.T) { newErrRegex string initializationErr string expectMetrics int - expectedStartTime pdata.Timestamp + expectedStartTime pcommon.Timestamp } testCases := []testCase{ @@ -161,7 +162,7 @@ func TestScrape(t *testing.T) { } } -func assertInt64DiskMetricValid(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { +func assertInt64DiskMetricValid(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) { if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, metric, startTime) } @@ -169,11 +170,11 @@ func assertInt64DiskMetricValid(t *testing.T, metric pdata.Metric, startTime pda assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2) internal.AssertSumMetricHasAttribute(t, metric, 0, "device") - internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pdata.NewValueString(metadata.AttributeDirection.Read)) - internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction", pdata.NewValueString(metadata.AttributeDirection.Write)) + internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Read)) + internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Write)) } -func assertDoubleDiskMetricValid(t *testing.T, metric pdata.Metric, expectDirectionLabels bool, startTime pdata.Timestamp) { +func assertDoubleDiskMetricValid(t *testing.T, metric pmetric.Metric, expectDirectionLabels bool, startTime pcommon.Timestamp) { if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, metric, startTime) } @@ -186,12 +187,12 @@ func assertDoubleDiskMetricValid(t *testing.T, metric pdata.Metric, expectDirect internal.AssertSumMetricHasAttribute(t, metric, 0, "device") if expectDirectionLabels { - internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pdata.NewValueString(metadata.AttributeDirection.Read)) - internal.AssertSumMetricHasAttributeValue(t, metric, metric.Sum().DataPoints().Len()-1, "direction", pdata.NewValueString(metadata.AttributeDirection.Write)) + internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Read)) + internal.AssertSumMetricHasAttributeValue(t, metric, metric.Sum().DataPoints().Len()-1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Write)) } } -func assertDiskPendingOperationsMetricValid(t *testing.T, metric pdata.Metric) { +func assertDiskPendingOperationsMetricValid(t *testing.T, metric pmetric.Metric) { assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 1) internal.AssertSumMetricHasAttribute(t, metric, 0, "device") } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go index 1179983fa565..610038b4f94a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/disk_scraper_windows.go @@ -21,7 +21,8 @@ import ( "github.com/shirou/gopsutil/v3/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -51,7 +52,7 @@ const ( // scraper for Disk Metrics type scraper struct { config *Config - startTime pdata.Timestamp + startTime pcommon.Timestamp mb *metadata.MetricsBuilder includeFS filterset.FilterSet excludeFS filterset.FilterSet @@ -91,23 +92,23 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.startTime = pdata.Timestamp(bootTime * 1e9) + s.startTime = pcommon.Timestamp(bootTime * 1e9) s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(s.startTime)) return s.perfCounterScraper.Initialize(logicalDisk) } -func (s *scraper) scrape(ctx context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (s *scraper) scrape(ctx context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) counters, err := s.perfCounterScraper.Scrape() if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } logicalDiskObject, err := counters.GetObject(logicalDisk) if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } // filter devices by name @@ -115,7 +116,7 @@ func (s *scraper) scrape(ctx context.Context) (pdata.Metrics, error) { logicalDiskCounterValues, err := logicalDiskObject.GetValues(readsPerSec, writesPerSec, readBytesPerSec, writeBytesPerSec, idleTime, avgDiskSecsPerRead, avgDiskSecsPerWrite, queueLength) if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } if len(logicalDiskCounterValues) > 0 { @@ -129,35 +130,35 @@ func (s *scraper) scrape(ctx context.Context) (pdata.Metrics, error) { return s.mb.Emit(), nil } -func (s *scraper) recordDiskIOMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { +func (s *scraper) recordDiskIOMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[readBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read) s.mb.RecordSystemDiskIoDataPoint(now, logicalDiskCounter.Values[writeBytesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write) } } -func (s *scraper) recordDiskOperationsMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { +func (s *scraper) recordDiskOperationsMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[readsPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read) s.mb.RecordSystemDiskOperationsDataPoint(now, logicalDiskCounter.Values[writesPerSec], logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write) } } -func (s *scraper) recordDiskIOTimeMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { +func (s *scraper) recordDiskIOTimeMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { // disk active time = system boot time - disk idle time s.mb.RecordSystemDiskIoTimeDataPoint(now, float64(now-s.startTime)/1e9-float64(logicalDiskCounter.Values[idleTime])/1e7, logicalDiskCounter.InstanceName) } } -func (s *scraper) recordDiskOperationTimeMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { +func (s *scraper) recordDiskOperationTimeMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerRead])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Read) s.mb.RecordSystemDiskOperationTimeDataPoint(now, float64(logicalDiskCounter.Values[avgDiskSecsPerWrite])/1e7, logicalDiskCounter.InstanceName, metadata.AttributeDirection.Write) } } -func (s *scraper) recordDiskPendingOperationsMetric(now pdata.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { +func (s *scraper) recordDiskPendingOperationsMetric(now pcommon.Timestamp, logicalDiskCounterValues []*perfcounters.CounterValues) { for _, logicalDiskCounter := range logicalDiskCounterValues { s.mb.RecordSystemDiskPendingOperationsDataPoint(now, logicalDiskCounter.Values[queueLength], logicalDiskCounter.InstanceName) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go index 582d838dff2f..6f2fb1d61b49 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/diskscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -52,7 +53,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemDiskIo struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -62,13 +63,13 @@ func (m *metricSystemDiskIo) init() { m.data.SetName("system.disk.io") m.data.SetDescription("Disk bytes transferred.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemDiskIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemDiskIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -76,8 +77,8 @@ func (m *metricSystemDiskIo) recordDataPoint(start pdata.Timestamp, ts pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -88,7 +89,7 @@ func (m *metricSystemDiskIo) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemDiskIo) emit(metrics pdata.MetricSlice) { +func (m *metricSystemDiskIo) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -99,14 +100,14 @@ func (m *metricSystemDiskIo) emit(metrics pdata.MetricSlice) { func newMetricSystemDiskIo(settings MetricSettings) metricSystemDiskIo { m := metricSystemDiskIo{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemDiskIoTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -116,13 +117,13 @@ func (m *metricSystemDiskIoTime) init() { m.data.SetName("system.disk.io_time") m.data.SetDescription("Time disk spent activated. On Windows, this is calculated as the inverse of disk idle time.") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemDiskIoTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, deviceAttributeValue string) { +func (m *metricSystemDiskIoTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, deviceAttributeValue string) { if !m.settings.Enabled { return } @@ -130,7 +131,7 @@ func (m *metricSystemDiskIoTime) recordDataPoint(start pdata.Timestamp, ts pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -141,7 +142,7 @@ func (m *metricSystemDiskIoTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemDiskIoTime) emit(metrics pdata.MetricSlice) { +func (m *metricSystemDiskIoTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -152,14 +153,14 @@ func (m *metricSystemDiskIoTime) emit(metrics pdata.MetricSlice) { func newMetricSystemDiskIoTime(settings MetricSettings) metricSystemDiskIoTime { m := metricSystemDiskIoTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemDiskMerged struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -169,13 +170,13 @@ func (m *metricSystemDiskMerged) init() { m.data.SetName("system.disk.merged") m.data.SetDescription("The number of disk reads merged into single physical disk access operations.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemDiskMerged) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemDiskMerged) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -183,8 +184,8 @@ func (m *metricSystemDiskMerged) recordDataPoint(start pdata.Timestamp, ts pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -195,7 +196,7 @@ func (m *metricSystemDiskMerged) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemDiskMerged) emit(metrics pdata.MetricSlice) { +func (m *metricSystemDiskMerged) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -206,14 +207,14 @@ func (m *metricSystemDiskMerged) emit(metrics pdata.MetricSlice) { func newMetricSystemDiskMerged(settings MetricSettings) metricSystemDiskMerged { m := metricSystemDiskMerged{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemDiskOperationTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -223,13 +224,13 @@ func (m *metricSystemDiskOperationTime) init() { m.data.SetName("system.disk.operation_time") m.data.SetDescription("Time spent in disk operations.") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemDiskOperationTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemDiskOperationTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -237,8 +238,8 @@ func (m *metricSystemDiskOperationTime) recordDataPoint(start pdata.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -249,7 +250,7 @@ func (m *metricSystemDiskOperationTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemDiskOperationTime) emit(metrics pdata.MetricSlice) { +func (m *metricSystemDiskOperationTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -260,14 +261,14 @@ func (m *metricSystemDiskOperationTime) emit(metrics pdata.MetricSlice) { func newMetricSystemDiskOperationTime(settings MetricSettings) metricSystemDiskOperationTime { m := metricSystemDiskOperationTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemDiskOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -277,13 +278,13 @@ func (m *metricSystemDiskOperations) init() { m.data.SetName("system.disk.operations") m.data.SetDescription("Disk operations count.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemDiskOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemDiskOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -291,8 +292,8 @@ func (m *metricSystemDiskOperations) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -303,7 +304,7 @@ func (m *metricSystemDiskOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemDiskOperations) emit(metrics pdata.MetricSlice) { +func (m *metricSystemDiskOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -314,14 +315,14 @@ func (m *metricSystemDiskOperations) emit(metrics pdata.MetricSlice) { func newMetricSystemDiskOperations(settings MetricSettings) metricSystemDiskOperations { m := metricSystemDiskOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemDiskPendingOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -331,13 +332,13 @@ func (m *metricSystemDiskPendingOperations) init() { m.data.SetName("system.disk.pending_operations") m.data.SetDescription("The queue size of pending I/O operations.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemDiskPendingOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string) { +func (m *metricSystemDiskPendingOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string) { if !m.settings.Enabled { return } @@ -345,7 +346,7 @@ func (m *metricSystemDiskPendingOperations) recordDataPoint(start pdata.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -356,7 +357,7 @@ func (m *metricSystemDiskPendingOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemDiskPendingOperations) emit(metrics pdata.MetricSlice) { +func (m *metricSystemDiskPendingOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -367,14 +368,14 @@ func (m *metricSystemDiskPendingOperations) emit(metrics pdata.MetricSlice) { func newMetricSystemDiskPendingOperations(settings MetricSettings) metricSystemDiskPendingOperations { m := metricSystemDiskPendingOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemDiskWeightedIoTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -384,13 +385,13 @@ func (m *metricSystemDiskWeightedIoTime) init() { m.data.SetName("system.disk.weighted_io_time") m.data.SetDescription("Time disk spent activated multiplied by the queue length.") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemDiskWeightedIoTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, deviceAttributeValue string) { +func (m *metricSystemDiskWeightedIoTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, deviceAttributeValue string) { if !m.settings.Enabled { return } @@ -398,7 +399,7 @@ func (m *metricSystemDiskWeightedIoTime) recordDataPoint(start pdata.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -409,7 +410,7 @@ func (m *metricSystemDiskWeightedIoTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemDiskWeightedIoTime) emit(metrics pdata.MetricSlice) { +func (m *metricSystemDiskWeightedIoTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -420,7 +421,7 @@ func (m *metricSystemDiskWeightedIoTime) emit(metrics pdata.MetricSlice) { func newMetricSystemDiskWeightedIoTime(settings MetricSettings) metricSystemDiskWeightedIoTime { m := metricSystemDiskWeightedIoTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -429,10 +430,10 @@ func newMetricSystemDiskWeightedIoTime(settings MetricSettings) metricSystemDisk // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemDiskIo metricSystemDiskIo metricSystemDiskIoTime metricSystemDiskIoTime metricSystemDiskMerged metricSystemDiskMerged @@ -446,7 +447,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -454,8 +455,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemDiskIo: newMetricSystemDiskIo(settings.SystemDiskIo), metricSystemDiskIoTime: newMetricSystemDiskIoTime(settings.SystemDiskIoTime), metricSystemDiskMerged: newMetricSystemDiskMerged(settings.SystemDiskMerged), @@ -471,7 +472,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -481,14 +482,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -513,52 +514,52 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemDiskIoDataPoint adds a data point to system.disk.io metric. -func (mb *MetricsBuilder) RecordSystemDiskIoDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemDiskIoDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemDiskIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // RecordSystemDiskIoTimeDataPoint adds a data point to system.disk.io_time metric. -func (mb *MetricsBuilder) RecordSystemDiskIoTimeDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemDiskIoTimeDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string) { mb.metricSystemDiskIoTime.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue) } // RecordSystemDiskMergedDataPoint adds a data point to system.disk.merged metric. -func (mb *MetricsBuilder) RecordSystemDiskMergedDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemDiskMergedDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemDiskMerged.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // RecordSystemDiskOperationTimeDataPoint adds a data point to system.disk.operation_time metric. -func (mb *MetricsBuilder) RecordSystemDiskOperationTimeDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemDiskOperationTimeDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemDiskOperationTime.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // RecordSystemDiskOperationsDataPoint adds a data point to system.disk.operations metric. -func (mb *MetricsBuilder) RecordSystemDiskOperationsDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemDiskOperationsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemDiskOperations.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // RecordSystemDiskPendingOperationsDataPoint adds a data point to system.disk.pending_operations metric. -func (mb *MetricsBuilder) RecordSystemDiskPendingOperationsDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemDiskPendingOperationsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string) { mb.metricSystemDiskPendingOperations.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue) } // RecordSystemDiskWeightedIoTimeDataPoint adds a data point to system.disk.weighted_io_time metric. -func (mb *MetricsBuilder) RecordSystemDiskWeightedIoTimeDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemDiskWeightedIoTimeDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string) { mb.metricSystemDiskWeightedIoTime.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go index 736c2a17bfbf..5592fba7971b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper.go @@ -22,7 +22,8 @@ import ( "github.com/shirou/gopsutil/v3/disk" "github.com/shirou/gopsutil/v3/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata" @@ -67,17 +68,17 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) // omit logical (virtual) filesystems (not relevant for windows) partitions, err := s.partitions( /*all=*/ false) if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } var errors scrapererror.ScrapeErrors diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go index add319f8b2bf..4b09c10f2199 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_others.go @@ -18,14 +18,14 @@ package filesystemscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata" ) const fileSystemStatesLen = 2 -func (s *scraper) recordFileSystemUsageMetric(now pdata.Timestamp, deviceUsages []*deviceUsage) { +func (s *scraper) recordFileSystemUsageMetric(now pcommon.Timestamp, deviceUsages []*deviceUsage) { for _, deviceUsage := range deviceUsages { s.mb.RecordSystemFilesystemUsageDataPoint( now, int64(deviceUsage.usage.Used), @@ -46,5 +46,5 @@ func (s *scraper) recordFileSystemUsageMetric(now pdata.Timestamp, deviceUsages const systemSpecificMetricsLen = 0 -func (s *scraper) recordSystemSpecificMetrics(now pdata.Timestamp, deviceUsages []*deviceUsage) { +func (s *scraper) recordSystemSpecificMetrics(now pcommon.Timestamp, deviceUsages []*deviceUsage) { } diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go index d573d1bcbca5..f1e7d00a89df 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go @@ -25,7 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -42,7 +43,7 @@ func TestScrape(t *testing.T) { usageFunc func(string) (*disk.UsageStat, error) expectMetrics bool expectedDeviceDataPoints int - expectedDeviceAttributes []map[string]pdata.Value + expectedDeviceAttributes []map[string]pcommon.Value newErrRegex string initializationErr string expectedErr string @@ -131,18 +132,18 @@ func TestScrape(t *testing.T) { }, expectMetrics: true, expectedDeviceDataPoints: 2, - expectedDeviceAttributes: []map[string]pdata.Value{ + expectedDeviceAttributes: []map[string]pcommon.Value{ { - "device": pdata.NewValueString("device_a"), - "mountpoint": pdata.NewValueString("mount_point_a"), - "type": pdata.NewValueString("fs_type_a"), - "mode": pdata.NewValueString("unknown"), + "device": pcommon.NewValueString("device_a"), + "mountpoint": pcommon.NewValueString("mount_point_a"), + "type": pcommon.NewValueString("fs_type_a"), + "mode": pcommon.NewValueString("unknown"), }, { - "device": pdata.NewValueString("device_b"), - "mountpoint": pdata.NewValueString("mount_point_d"), - "type": pdata.NewValueString("fs_type_c"), - "mode": pdata.NewValueString("unknown"), + "device": pcommon.NewValueString("device_b"), + "mountpoint": pcommon.NewValueString("mount_point_d"), + "type": pcommon.NewValueString("fs_type_c"), + "mode": pcommon.NewValueString("unknown"), }, }, }, @@ -281,20 +282,20 @@ func TestScrape(t *testing.T) { } } -func findMetricByName(metrics pdata.MetricSlice, name string) (pdata.Metric, error) { +func findMetricByName(metrics pmetric.MetricSlice, name string) (pmetric.Metric, error) { for i := 0; i < metrics.Len(); i++ { if metrics.At(i).Name() == name { return metrics.At(i), nil } } - return pdata.Metric{}, fmt.Errorf("no metric found with name %s", name) + return pmetric.Metric{}, fmt.Errorf("no metric found with name %s", name) } func assertFileSystemUsageMetricValid( t *testing.T, - metric pdata.Metric, + metric pmetric.Metric, expectedDeviceDataPoints int, - expectedDeviceAttributes []map[string]pdata.Value) { + expectedDeviceAttributes []map[string]pcommon.Value) { for i := 0; i < metric.Sum().DataPoints().Len(); i++ { for _, label := range []string{"device", "type", "mode", "mountpoint"} { internal.AssertSumMetricHasAttribute(t, metric, i, label) @@ -320,12 +321,12 @@ func assertFileSystemUsageMetricValid( } else { assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), fileSystemStatesLen) } - internal.AssertSumMetricHasAttributeValue(t, metric, 0, "state", pdata.NewValueString(metadata.AttributeState.Used)) - internal.AssertSumMetricHasAttributeValue(t, metric, 1, "state", pdata.NewValueString(metadata.AttributeState.Free)) + internal.AssertSumMetricHasAttributeValue(t, metric, 0, "state", pcommon.NewValueString(metadata.AttributeState.Used)) + internal.AssertSumMetricHasAttributeValue(t, metric, 1, "state", pcommon.NewValueString(metadata.AttributeState.Free)) } -func assertFileSystemUsageMetricHasUnixSpecificStateLabels(t *testing.T, metric pdata.Metric) { - internal.AssertSumMetricHasAttributeValue(t, metric, 2, "state", pdata.NewValueString(metadata.AttributeState.Reserved)) +func assertFileSystemUsageMetricHasUnixSpecificStateLabels(t *testing.T, metric pmetric.Metric) { + internal.AssertSumMetricHasAttributeValue(t, metric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Reserved)) } func isUnix() bool { diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go index 28d47f4c5431..3e8857bfac5d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_unix.go @@ -18,14 +18,14 @@ package filesystemscraper // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata" ) const fileSystemStatesLen = 3 -func (s *scraper) recordFileSystemUsageMetric(now pdata.Timestamp, deviceUsages []*deviceUsage) { +func (s *scraper) recordFileSystemUsageMetric(now pcommon.Timestamp, deviceUsages []*deviceUsage) { for _, deviceUsage := range deviceUsages { s.mb.RecordSystemFilesystemUsageDataPoint( now, int64(deviceUsage.usage.Used), @@ -51,7 +51,7 @@ func (s *scraper) recordFileSystemUsageMetric(now pdata.Timestamp, deviceUsages const systemSpecificMetricsLen = 1 -func (s *scraper) recordSystemSpecificMetrics(now pdata.Timestamp, deviceUsages []*deviceUsage) { +func (s *scraper) recordSystemSpecificMetrics(now pcommon.Timestamp, deviceUsages []*deviceUsage) { for _, deviceUsage := range deviceUsages { s.mb.RecordSystemFilesystemInodesUsageDataPoint( now, int64(deviceUsage.usage.InodesUsed), deviceUsage.partition.Device, diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go index 4658ef6313e5..37f35b8825ee 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -36,7 +37,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemFilesystemInodesUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -46,13 +47,13 @@ func (m *metricSystemFilesystemInodesUsage) init() { m.data.SetName("system.filesystem.inodes.usage") m.data.SetDescription("FileSystem inodes used.") m.data.SetUnit("{inodes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemFilesystemInodesUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { +func (m *metricSystemFilesystemInodesUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -60,11 +61,11 @@ func (m *metricSystemFilesystemInodesUsage) recordDataPoint(start pdata.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Mode, pdata.NewValueString(modeAttributeValue)) - dp.Attributes().Insert(A.Mountpoint, pdata.NewValueString(mountpointAttributeValue)) - dp.Attributes().Insert(A.Type, pdata.NewValueString(typeAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Mode, pcommon.NewValueString(modeAttributeValue)) + dp.Attributes().Insert(A.Mountpoint, pcommon.NewValueString(mountpointAttributeValue)) + dp.Attributes().Insert(A.Type, pcommon.NewValueString(typeAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -75,7 +76,7 @@ func (m *metricSystemFilesystemInodesUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemFilesystemInodesUsage) emit(metrics pdata.MetricSlice) { +func (m *metricSystemFilesystemInodesUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -86,14 +87,14 @@ func (m *metricSystemFilesystemInodesUsage) emit(metrics pdata.MetricSlice) { func newMetricSystemFilesystemInodesUsage(settings MetricSettings) metricSystemFilesystemInodesUsage { m := metricSystemFilesystemInodesUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemFilesystemUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -103,13 +104,13 @@ func (m *metricSystemFilesystemUsage) init() { m.data.SetName("system.filesystem.usage") m.data.SetDescription("Filesystem bytes used.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemFilesystemUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { +func (m *metricSystemFilesystemUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -117,11 +118,11 @@ func (m *metricSystemFilesystemUsage) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Mode, pdata.NewValueString(modeAttributeValue)) - dp.Attributes().Insert(A.Mountpoint, pdata.NewValueString(mountpointAttributeValue)) - dp.Attributes().Insert(A.Type, pdata.NewValueString(typeAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Mode, pcommon.NewValueString(modeAttributeValue)) + dp.Attributes().Insert(A.Mountpoint, pcommon.NewValueString(mountpointAttributeValue)) + dp.Attributes().Insert(A.Type, pcommon.NewValueString(typeAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -132,7 +133,7 @@ func (m *metricSystemFilesystemUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemFilesystemUsage) emit(metrics pdata.MetricSlice) { +func (m *metricSystemFilesystemUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -143,14 +144,14 @@ func (m *metricSystemFilesystemUsage) emit(metrics pdata.MetricSlice) { func newMetricSystemFilesystemUsage(settings MetricSettings) metricSystemFilesystemUsage { m := metricSystemFilesystemUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemFilesystemUtilization struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -160,11 +161,11 @@ func (m *metricSystemFilesystemUtilization) init() { m.data.SetName("system.filesystem.utilization") m.data.SetDescription("Fraction of filesystem bytes used.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemFilesystemUtilization) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string) { +func (m *metricSystemFilesystemUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string) { if !m.settings.Enabled { return } @@ -172,10 +173,10 @@ func (m *metricSystemFilesystemUtilization) recordDataPoint(start pdata.Timestam dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Mode, pdata.NewValueString(modeAttributeValue)) - dp.Attributes().Insert(A.Mountpoint, pdata.NewValueString(mountpointAttributeValue)) - dp.Attributes().Insert(A.Type, pdata.NewValueString(typeAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Mode, pcommon.NewValueString(modeAttributeValue)) + dp.Attributes().Insert(A.Mountpoint, pcommon.NewValueString(mountpointAttributeValue)) + dp.Attributes().Insert(A.Type, pcommon.NewValueString(typeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -186,7 +187,7 @@ func (m *metricSystemFilesystemUtilization) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemFilesystemUtilization) emit(metrics pdata.MetricSlice) { +func (m *metricSystemFilesystemUtilization) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -197,7 +198,7 @@ func (m *metricSystemFilesystemUtilization) emit(metrics pdata.MetricSlice) { func newMetricSystemFilesystemUtilization(settings MetricSettings) metricSystemFilesystemUtilization { m := metricSystemFilesystemUtilization{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -206,10 +207,10 @@ func newMetricSystemFilesystemUtilization(settings MetricSettings) metricSystemF // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemFilesystemInodesUsage metricSystemFilesystemInodesUsage metricSystemFilesystemUsage metricSystemFilesystemUsage metricSystemFilesystemUtilization metricSystemFilesystemUtilization @@ -219,7 +220,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -227,8 +228,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemFilesystemInodesUsage: newMetricSystemFilesystemInodesUsage(settings.SystemFilesystemInodesUsage), metricSystemFilesystemUsage: newMetricSystemFilesystemUsage(settings.SystemFilesystemUsage), metricSystemFilesystemUtilization: newMetricSystemFilesystemUtilization(settings.SystemFilesystemUtilization), @@ -240,7 +241,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -250,14 +251,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -278,32 +279,32 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemFilesystemInodesUsageDataPoint adds a data point to system.filesystem.inodes.usage metric. -func (mb *MetricsBuilder) RecordSystemFilesystemInodesUsageDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemFilesystemInodesUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { mb.metricSystemFilesystemInodesUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, modeAttributeValue, mountpointAttributeValue, typeAttributeValue, stateAttributeValue) } // RecordSystemFilesystemUsageDataPoint adds a data point to system.filesystem.usage metric. -func (mb *MetricsBuilder) RecordSystemFilesystemUsageDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemFilesystemUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string, stateAttributeValue string) { mb.metricSystemFilesystemUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, modeAttributeValue, mountpointAttributeValue, typeAttributeValue, stateAttributeValue) } // RecordSystemFilesystemUtilizationDataPoint adds a data point to system.filesystem.utilization metric. -func (mb *MetricsBuilder) RecordSystemFilesystemUtilizationDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemFilesystemUtilizationDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, modeAttributeValue string, mountpointAttributeValue string, typeAttributeValue string) { mb.metricSystemFilesystemUtilization.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, modeAttributeValue, mountpointAttributeValue, typeAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go index 4922334dd715..499cfe6a3055 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -36,7 +37,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemCPULoadAverage15m struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -46,10 +47,10 @@ func (m *metricSystemCPULoadAverage15m) init() { m.data.SetName("system.cpu.load_average.15m") m.data.SetDescription("Average CPU Load over 15 minutes.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricSystemCPULoadAverage15m) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricSystemCPULoadAverage15m) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -67,7 +68,7 @@ func (m *metricSystemCPULoadAverage15m) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemCPULoadAverage15m) emit(metrics pdata.MetricSlice) { +func (m *metricSystemCPULoadAverage15m) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -78,14 +79,14 @@ func (m *metricSystemCPULoadAverage15m) emit(metrics pdata.MetricSlice) { func newMetricSystemCPULoadAverage15m(settings MetricSettings) metricSystemCPULoadAverage15m { m := metricSystemCPULoadAverage15m{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemCPULoadAverage1m struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -95,10 +96,10 @@ func (m *metricSystemCPULoadAverage1m) init() { m.data.SetName("system.cpu.load_average.1m") m.data.SetDescription("Average CPU Load over 1 minute.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricSystemCPULoadAverage1m) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricSystemCPULoadAverage1m) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -116,7 +117,7 @@ func (m *metricSystemCPULoadAverage1m) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemCPULoadAverage1m) emit(metrics pdata.MetricSlice) { +func (m *metricSystemCPULoadAverage1m) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -127,14 +128,14 @@ func (m *metricSystemCPULoadAverage1m) emit(metrics pdata.MetricSlice) { func newMetricSystemCPULoadAverage1m(settings MetricSettings) metricSystemCPULoadAverage1m { m := metricSystemCPULoadAverage1m{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemCPULoadAverage5m struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -144,10 +145,10 @@ func (m *metricSystemCPULoadAverage5m) init() { m.data.SetName("system.cpu.load_average.5m") m.data.SetDescription("Average CPU Load over 5 minutes.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricSystemCPULoadAverage5m) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricSystemCPULoadAverage5m) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -165,7 +166,7 @@ func (m *metricSystemCPULoadAverage5m) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemCPULoadAverage5m) emit(metrics pdata.MetricSlice) { +func (m *metricSystemCPULoadAverage5m) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -176,7 +177,7 @@ func (m *metricSystemCPULoadAverage5m) emit(metrics pdata.MetricSlice) { func newMetricSystemCPULoadAverage5m(settings MetricSettings) metricSystemCPULoadAverage5m { m := metricSystemCPULoadAverage5m{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -185,10 +186,10 @@ func newMetricSystemCPULoadAverage5m(settings MetricSettings) metricSystemCPULoa // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemCPULoadAverage15m metricSystemCPULoadAverage15m metricSystemCPULoadAverage1m metricSystemCPULoadAverage1m metricSystemCPULoadAverage5m metricSystemCPULoadAverage5m @@ -198,7 +199,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -206,8 +207,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemCPULoadAverage15m: newMetricSystemCPULoadAverage15m(settings.SystemCPULoadAverage15m), metricSystemCPULoadAverage1m: newMetricSystemCPULoadAverage1m(settings.SystemCPULoadAverage1m), metricSystemCPULoadAverage5m: newMetricSystemCPULoadAverage5m(settings.SystemCPULoadAverage5m), @@ -219,7 +220,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -229,14 +230,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -257,32 +258,32 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemCPULoadAverage15mDataPoint adds a data point to system.cpu.load_average.15m metric. -func (mb *MetricsBuilder) RecordSystemCPULoadAverage15mDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordSystemCPULoadAverage15mDataPoint(ts pcommon.Timestamp, val float64) { mb.metricSystemCPULoadAverage15m.recordDataPoint(mb.startTime, ts, val) } // RecordSystemCPULoadAverage1mDataPoint adds a data point to system.cpu.load_average.1m metric. -func (mb *MetricsBuilder) RecordSystemCPULoadAverage1mDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordSystemCPULoadAverage1mDataPoint(ts pcommon.Timestamp, val float64) { mb.metricSystemCPULoadAverage1m.recordDataPoint(mb.startTime, ts, val) } // RecordSystemCPULoadAverage5mDataPoint adds a data point to system.cpu.load_average.5m metric. -func (mb *MetricsBuilder) RecordSystemCPULoadAverage5mDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordSystemCPULoadAverage5mDataPoint(ts pcommon.Timestamp, val float64) { mb.metricSystemCPULoadAverage5m.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go index d73ffd1271c6..c4ec37b17ee7 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper.go @@ -22,7 +22,8 @@ import ( "github.com/shirou/gopsutil/v3/host" "github.com/shirou/gopsutil/v3/load" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -54,7 +55,7 @@ func (s *scraper) start(ctx context.Context, _ component.Host) error { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return startSampling(ctx, s.logger) } @@ -64,11 +65,11 @@ func (s *scraper) shutdown(ctx context.Context) error { } // scrape -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) avgLoadValues, err := s.load() if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } if s.config.CPUAverage { diff --git a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go index bca53d009e8d..d507645c3232 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/loadscraper/load_scraper_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -74,7 +74,7 @@ func TestScrape(t *testing.T) { expectedErr: "err1", }, } - results := make(map[string]pdata.MetricSlice) + results := make(map[string]pmetric.MetricSlice) for _, test := range testCases { t.Run(test.name, func(t *testing.T) { @@ -134,12 +134,12 @@ func TestScrape(t *testing.T) { } } -func assertMetricHasSingleDatapoint(t *testing.T, metric pdata.Metric, expectedName string) { +func assertMetricHasSingleDatapoint(t *testing.T, metric pmetric.Metric, expectedName string) { assert.Equal(t, expectedName, metric.Name()) assert.Equal(t, 1, metric.Gauge().DataPoints().Len()) } -func assertCompareAveragePerCPU(t *testing.T, average pdata.Metric, standard pdata.Metric, numCPU int) { +func assertCompareAveragePerCPU(t *testing.T, average pmetric.Metric, standard pmetric.Metric, numCPU int) { valAverage := average.Gauge().DataPoints().At(0).DoubleVal() valStandard := standard.Gauge().DataPoints().At(0).DoubleVal() if numCPU == 1 { diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go index 70631c51b68c..a8de364661a7 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -32,7 +33,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemMemoryUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -42,13 +43,13 @@ func (m *metricSystemMemoryUsage) init() { m.data.SetName("system.memory.usage") m.data.SetDescription("Bytes of memory in use.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, stateAttributeValue string) { +func (m *metricSystemMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -56,7 +57,7 @@ func (m *metricSystemMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -67,7 +68,7 @@ func (m *metricSystemMemoryUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemMemoryUsage) emit(metrics pdata.MetricSlice) { +func (m *metricSystemMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -78,14 +79,14 @@ func (m *metricSystemMemoryUsage) emit(metrics pdata.MetricSlice) { func newMetricSystemMemoryUsage(settings MetricSettings) metricSystemMemoryUsage { m := metricSystemMemoryUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemMemoryUtilization struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -95,11 +96,11 @@ func (m *metricSystemMemoryUtilization) init() { m.data.SetName("system.memory.utilization") m.data.SetDescription("Percentage of memory bytes in use.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemMemoryUtilization) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, stateAttributeValue string) { +func (m *metricSystemMemoryUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -107,7 +108,7 @@ func (m *metricSystemMemoryUtilization) recordDataPoint(start pdata.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -118,7 +119,7 @@ func (m *metricSystemMemoryUtilization) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemMemoryUtilization) emit(metrics pdata.MetricSlice) { +func (m *metricSystemMemoryUtilization) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -129,7 +130,7 @@ func (m *metricSystemMemoryUtilization) emit(metrics pdata.MetricSlice) { func newMetricSystemMemoryUtilization(settings MetricSettings) metricSystemMemoryUtilization { m := metricSystemMemoryUtilization{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -138,10 +139,10 @@ func newMetricSystemMemoryUtilization(settings MetricSettings) metricSystemMemor // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemMemoryUsage metricSystemMemoryUsage metricSystemMemoryUtilization metricSystemMemoryUtilization } @@ -150,7 +151,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -158,8 +159,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemMemoryUsage: newMetricSystemMemoryUsage(settings.SystemMemoryUsage), metricSystemMemoryUtilization: newMetricSystemMemoryUtilization(settings.SystemMemoryUtilization), } @@ -170,7 +171,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -180,14 +181,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -207,27 +208,27 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemMemoryUsageDataPoint adds a data point to system.memory.usage metric. -func (mb *MetricsBuilder) RecordSystemMemoryUsageDataPoint(ts pdata.Timestamp, val int64, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue string) { mb.metricSystemMemoryUsage.recordDataPoint(mb.startTime, ts, val, stateAttributeValue) } // RecordSystemMemoryUtilizationDataPoint adds a data point to system.memory.utilization metric. -func (mb *MetricsBuilder) RecordSystemMemoryUtilizationDataPoint(ts pdata.Timestamp, val float64, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemMemoryUtilizationDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue string) { mb.metricSystemMemoryUtilization.recordDataPoint(mb.startTime, ts, val, stateAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go index 52edd5131105..0d0e3ef8dac1 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper.go @@ -23,7 +23,8 @@ import ( "github.com/shirou/gopsutil/v3/host" "github.com/shirou/gopsutil/v3/mem" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata" @@ -54,21 +55,21 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) memInfo, err := s.virtualMemory() if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLen) } if memInfo != nil { s.recordMemoryUsageMetric(now, memInfo) if memInfo.Total <= 0 { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(fmt.Errorf("%w: %d", ErrInvalidTotalMem, + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(fmt.Errorf("%w: %d", ErrInvalidTotalMem, memInfo.Total), metricsLen) } s.recordMemoryUtilizationMetric(now, memInfo) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go index 02b3c786d781..757936c9c5fb 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_linux.go @@ -19,12 +19,12 @@ package memoryscraper // import "github.com/open-telemetry/opentelemetry-collect import ( "github.com/shirou/gopsutil/v3/mem" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata" ) -func (s *scraper) recordMemoryUsageMetric(now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { +func (s *scraper) recordMemoryUsageMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) { s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeState.Used) s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeState.Free) s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Buffers), metadata.AttributeState.Buffered) @@ -33,7 +33,7 @@ func (s *scraper) recordMemoryUsageMetric(now pdata.Timestamp, memInfo *mem.Virt s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Sunreclaim), metadata.AttributeState.SlabUnreclaimable) } -func (s *scraper) recordMemoryUtilizationMetric(now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { +func (s *scraper) recordMemoryUtilizationMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) { s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeState.Used) s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeState.Free) s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Buffers)/float64(memInfo.Total), metadata.AttributeState.Buffered) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go index 7a44001a07dc..e9ccb0b02ba3 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_others.go @@ -19,18 +19,18 @@ package memoryscraper // import "github.com/open-telemetry/opentelemetry-collect import ( "github.com/shirou/gopsutil/v3/mem" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata" ) -func (s *scraper) recordMemoryUsageMetric(now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { +func (s *scraper) recordMemoryUsageMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) { s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeState.Used) s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeState.Free) s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Inactive), metadata.AttributeState.Inactive) } -func (s *scraper) recordMemoryUtilizationMetric(now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { +func (s *scraper) recordMemoryUtilizationMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) { s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeState.Used) s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeState.Free) s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Inactive)/float64(memInfo.Total), metadata.AttributeState.Inactive) diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go index d00511b7d760..4dbf38ac3a05 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_test.go @@ -24,7 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" @@ -122,7 +123,7 @@ func TestScrape(t *testing.T) { if runtime.GOOS == "linux" { assertMemoryUsageMetricHasLinuxSpecificStateLabels(t, metrics.At(0)) } else if runtime.GOOS != "windows" { - internal.AssertSumMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Inactive)) + internal.AssertSumMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Inactive)) } internal.AssertSameTimeStampForAllMetrics(t, metrics) @@ -177,7 +178,7 @@ func TestScrape_MemoryUtilization(t *testing.T) { if runtime.GOOS == "linux" { assertMemoryUtilizationMetricHasLinuxSpecificStateLabels(t, metrics.At(0)) } else if runtime.GOOS != "windows" { - internal.AssertGaugeMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Inactive)) + internal.AssertGaugeMetricHasAttributeValue(t, metrics.At(0), 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Inactive)) } internal.AssertSameTimeStampForAllMetrics(t, metrics) @@ -185,30 +186,30 @@ func TestScrape_MemoryUtilization(t *testing.T) { } } -func assertMemoryUsageMetricValid(t *testing.T, metric pdata.Metric, expectedName string) { +func assertMemoryUsageMetricValid(t *testing.T, metric pmetric.Metric, expectedName string) { assert.Equal(t, expectedName, metric.Name()) assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2) - internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Used)) - internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Free)) + internal.AssertSumMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Used)) + internal.AssertSumMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Free)) } -func assertMemoryUtilizationMetricValid(t *testing.T, metric pdata.Metric, expectedName string) { +func assertMemoryUtilizationMetricValid(t *testing.T, metric pmetric.Metric, expectedName string) { assert.Equal(t, expectedName, metric.Name()) assert.GreaterOrEqual(t, metric.Gauge().DataPoints().Len(), 2) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Used)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Free)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 0, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Used)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 1, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Free)) } -func assertMemoryUsageMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { - internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Buffered)) - internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Cached)) - internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.SlabReclaimable)) - internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.SlabUnreclaimable)) +func assertMemoryUsageMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) { + internal.AssertSumMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Buffered)) + internal.AssertSumMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Cached)) + internal.AssertSumMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabReclaimable)) + internal.AssertSumMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabUnreclaimable)) } -func assertMemoryUtilizationMetricHasLinuxSpecificStateLabels(t *testing.T, metric pdata.Metric) { - internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Buffered)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.Cached)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.SlabReclaimable)) - internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pdata.NewValueString(metadata.AttributeState.SlabUnreclaimable)) +func assertMemoryUtilizationMetricHasLinuxSpecificStateLabels(t *testing.T, metric pmetric.Metric) { + internal.AssertGaugeMetricHasAttributeValue(t, metric, 2, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Buffered)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 3, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.Cached)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 4, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabReclaimable)) + internal.AssertGaugeMetricHasAttributeValue(t, metric, 5, metadata.Attributes.State, pcommon.NewValueString(metadata.AttributeState.SlabUnreclaimable)) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go index 10297ddfcac4..3579b6517906 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/memory_scraper_windows.go @@ -19,17 +19,17 @@ package memoryscraper // import "github.com/open-telemetry/opentelemetry-collect import ( "github.com/shirou/gopsutil/v3/mem" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/memoryscraper/internal/metadata" ) -func (s *scraper) recordMemoryUsageMetric(now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { +func (s *scraper) recordMemoryUsageMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) { s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Used), metadata.AttributeState.Used) s.mb.RecordSystemMemoryUsageDataPoint(now, int64(memInfo.Free), metadata.AttributeState.Free) } -func (s *scraper) recordMemoryUtilizationMetric(now pdata.Timestamp, memInfo *mem.VirtualMemoryStat) { +func (s *scraper) recordMemoryUtilizationMetric(now pcommon.Timestamp, memInfo *mem.VirtualMemoryStat) { s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Used)/float64(memInfo.Total), metadata.AttributeState.Used) s.mb.RecordSystemMemoryUtilizationDataPoint(now, float64(memInfo.Free)/float64(memInfo.Total), metadata.AttributeState.Free) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go index 1692273ad068..0c86958c213e 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -44,7 +45,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemNetworkConnections struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -54,13 +55,13 @@ func (m *metricSystemNetworkConnections) init() { m.data.SetName("system.network.connections") m.data.SetDescription("The number of connections.") m.data.SetUnit("{connections}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemNetworkConnections) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, protocolAttributeValue string, stateAttributeValue string) { +func (m *metricSystemNetworkConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, protocolAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -68,8 +69,8 @@ func (m *metricSystemNetworkConnections) recordDataPoint(start pdata.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Protocol, pdata.NewValueString(protocolAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Protocol, pcommon.NewValueString(protocolAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -80,7 +81,7 @@ func (m *metricSystemNetworkConnections) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemNetworkConnections) emit(metrics pdata.MetricSlice) { +func (m *metricSystemNetworkConnections) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -91,14 +92,14 @@ func (m *metricSystemNetworkConnections) emit(metrics pdata.MetricSlice) { func newMetricSystemNetworkConnections(settings MetricSettings) metricSystemNetworkConnections { m := metricSystemNetworkConnections{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemNetworkDropped struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -108,13 +109,13 @@ func (m *metricSystemNetworkDropped) init() { m.data.SetName("system.network.dropped") m.data.SetDescription("The number of packets dropped.") m.data.SetUnit("{packets}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemNetworkDropped) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemNetworkDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -122,8 +123,8 @@ func (m *metricSystemNetworkDropped) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -134,7 +135,7 @@ func (m *metricSystemNetworkDropped) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemNetworkDropped) emit(metrics pdata.MetricSlice) { +func (m *metricSystemNetworkDropped) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -145,14 +146,14 @@ func (m *metricSystemNetworkDropped) emit(metrics pdata.MetricSlice) { func newMetricSystemNetworkDropped(settings MetricSettings) metricSystemNetworkDropped { m := metricSystemNetworkDropped{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemNetworkErrors struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -162,13 +163,13 @@ func (m *metricSystemNetworkErrors) init() { m.data.SetName("system.network.errors") m.data.SetDescription("The number of errors encountered.") m.data.SetUnit("{errors}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemNetworkErrors) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemNetworkErrors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -176,8 +177,8 @@ func (m *metricSystemNetworkErrors) recordDataPoint(start pdata.Timestamp, ts pd dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -188,7 +189,7 @@ func (m *metricSystemNetworkErrors) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemNetworkErrors) emit(metrics pdata.MetricSlice) { +func (m *metricSystemNetworkErrors) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -199,14 +200,14 @@ func (m *metricSystemNetworkErrors) emit(metrics pdata.MetricSlice) { func newMetricSystemNetworkErrors(settings MetricSettings) metricSystemNetworkErrors { m := metricSystemNetworkErrors{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemNetworkIo struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -216,13 +217,13 @@ func (m *metricSystemNetworkIo) init() { m.data.SetName("system.network.io") m.data.SetDescription("The number of bytes transmitted and received.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemNetworkIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -230,8 +231,8 @@ func (m *metricSystemNetworkIo) recordDataPoint(start pdata.Timestamp, ts pdata. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -242,7 +243,7 @@ func (m *metricSystemNetworkIo) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemNetworkIo) emit(metrics pdata.MetricSlice) { +func (m *metricSystemNetworkIo) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -253,14 +254,14 @@ func (m *metricSystemNetworkIo) emit(metrics pdata.MetricSlice) { func newMetricSystemNetworkIo(settings MetricSettings) metricSystemNetworkIo { m := metricSystemNetworkIo{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemNetworkPackets struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -270,13 +271,13 @@ func (m *metricSystemNetworkPackets) init() { m.data.SetName("system.network.packets") m.data.SetDescription("The number of packets transferred.") m.data.SetUnit("{packets}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemNetworkPackets) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (m *metricSystemNetworkPackets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -284,8 +285,8 @@ func (m *metricSystemNetworkPackets) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -296,7 +297,7 @@ func (m *metricSystemNetworkPackets) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemNetworkPackets) emit(metrics pdata.MetricSlice) { +func (m *metricSystemNetworkPackets) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -307,7 +308,7 @@ func (m *metricSystemNetworkPackets) emit(metrics pdata.MetricSlice) { func newMetricSystemNetworkPackets(settings MetricSettings) metricSystemNetworkPackets { m := metricSystemNetworkPackets{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -316,10 +317,10 @@ func newMetricSystemNetworkPackets(settings MetricSettings) metricSystemNetworkP // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemNetworkConnections metricSystemNetworkConnections metricSystemNetworkDropped metricSystemNetworkDropped metricSystemNetworkErrors metricSystemNetworkErrors @@ -331,7 +332,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -339,8 +340,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemNetworkConnections: newMetricSystemNetworkConnections(settings.SystemNetworkConnections), metricSystemNetworkDropped: newMetricSystemNetworkDropped(settings.SystemNetworkDropped), metricSystemNetworkErrors: newMetricSystemNetworkErrors(settings.SystemNetworkErrors), @@ -354,7 +355,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -364,14 +365,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -394,42 +395,42 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemNetworkConnectionsDataPoint adds a data point to system.network.connections metric. -func (mb *MetricsBuilder) RecordSystemNetworkConnectionsDataPoint(ts pdata.Timestamp, val int64, protocolAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemNetworkConnectionsDataPoint(ts pcommon.Timestamp, val int64, protocolAttributeValue string, stateAttributeValue string) { mb.metricSystemNetworkConnections.recordDataPoint(mb.startTime, ts, val, protocolAttributeValue, stateAttributeValue) } // RecordSystemNetworkDroppedDataPoint adds a data point to system.network.dropped metric. -func (mb *MetricsBuilder) RecordSystemNetworkDroppedDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemNetworkDroppedDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemNetworkDropped.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // RecordSystemNetworkErrorsDataPoint adds a data point to system.network.errors metric. -func (mb *MetricsBuilder) RecordSystemNetworkErrorsDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemNetworkErrorsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemNetworkErrors.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // RecordSystemNetworkIoDataPoint adds a data point to system.network.io metric. -func (mb *MetricsBuilder) RecordSystemNetworkIoDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemNetworkIoDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemNetworkIo.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // RecordSystemNetworkPacketsDataPoint adds a data point to system.network.packets metric. -func (mb *MetricsBuilder) RecordSystemNetworkPacketsDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemNetworkPacketsDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, directionAttributeValue string) { mb.metricSystemNetworkPackets.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, directionAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go index b5701ce62e7e..9a0b3084f162 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper.go @@ -22,7 +22,8 @@ import ( "github.com/shirou/gopsutil/v3/host" "github.com/shirou/gopsutil/v3/net" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -38,7 +39,7 @@ const ( type scraper struct { config *Config mb *metadata.MetricsBuilder - startTime pdata.Timestamp + startTime pcommon.Timestamp includeFS filterset.FilterSet excludeFS filterset.FilterSet @@ -77,12 +78,12 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.startTime = pdata.Timestamp(bootTime * 1e9) - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.startTime = pcommon.Timestamp(bootTime * 1e9) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { var errors scrapererror.ScrapeErrors err := s.recordNetworkCounterMetrics() @@ -99,7 +100,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { } func (s *scraper) recordNetworkCounterMetrics() error { - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) // get total stats only ioCounters, err := s.ioCounters( /*perNetworkInterfaceController=*/ true) @@ -120,28 +121,28 @@ func (s *scraper) recordNetworkCounterMetrics() error { return nil } -func (s *scraper) recordNetworkPacketsMetric(now pdata.Timestamp, ioCountersSlice []net.IOCountersStat) { +func (s *scraper) recordNetworkPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) { for _, ioCounters := range ioCountersSlice { s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsSent), ioCounters.Name, metadata.AttributeDirection.Transmit) s.mb.RecordSystemNetworkPacketsDataPoint(now, int64(ioCounters.PacketsRecv), ioCounters.Name, metadata.AttributeDirection.Receive) } } -func (s *scraper) recordNetworkDroppedPacketsMetric(now pdata.Timestamp, ioCountersSlice []net.IOCountersStat) { +func (s *scraper) recordNetworkDroppedPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) { for _, ioCounters := range ioCountersSlice { s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropout), ioCounters.Name, metadata.AttributeDirection.Transmit) s.mb.RecordSystemNetworkDroppedDataPoint(now, int64(ioCounters.Dropin), ioCounters.Name, metadata.AttributeDirection.Receive) } } -func (s *scraper) recordNetworkErrorPacketsMetric(now pdata.Timestamp, ioCountersSlice []net.IOCountersStat) { +func (s *scraper) recordNetworkErrorPacketsMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) { for _, ioCounters := range ioCountersSlice { s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errout), ioCounters.Name, metadata.AttributeDirection.Transmit) s.mb.RecordSystemNetworkErrorsDataPoint(now, int64(ioCounters.Errin), ioCounters.Name, metadata.AttributeDirection.Receive) } } -func (s *scraper) recordNetworkIOMetric(now pdata.Timestamp, ioCountersSlice []net.IOCountersStat) { +func (s *scraper) recordNetworkIOMetric(now pcommon.Timestamp, ioCountersSlice []net.IOCountersStat) { for _, ioCounters := range ioCountersSlice { s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesSent), ioCounters.Name, metadata.AttributeDirection.Transmit) s.mb.RecordSystemNetworkIoDataPoint(now, int64(ioCounters.BytesRecv), ioCounters.Name, metadata.AttributeDirection.Receive) @@ -149,7 +150,7 @@ func (s *scraper) recordNetworkIOMetric(now pdata.Timestamp, ioCountersSlice []n } func (s *scraper) recordNetworkConnectionsMetrics() error { - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) connections, err := s.connections("tcp") if err != nil { @@ -174,7 +175,7 @@ func getTCPConnectionStatusCounts(connections []net.ConnectionStat) map[string]i return tcpStatuses } -func (s *scraper) recordNetworkConnectionsMetric(now pdata.Timestamp, connectionStateCounts map[string]int64) { +func (s *scraper) recordNetworkConnectionsMetric(now pcommon.Timestamp, connectionStateCounts map[string]int64) { for connectionState, count := range connectionStateCounts { s.mb.RecordSystemNetworkConnectionsDataPoint(now, count, metadata.AttributeProtocol.Tcp, connectionState) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go index f05e418a8840..9b57f62d7f8a 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/networkscraper/network_scraper_test.go @@ -23,7 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -39,7 +40,7 @@ func TestScrape(t *testing.T) { ioCountersFunc func(bool) ([]net.IOCountersStat, error) connectionsFunc func(string) ([]net.ConnectionStat, error) expectNetworkMetrics bool - expectedStartTime pdata.Timestamp + expectedStartTime pcommon.Timestamp newErrRegex string initializationErr string expectedErr string @@ -170,20 +171,20 @@ func TestScrape(t *testing.T) { } } -func assertNetworkIOMetricValid(t *testing.T, metric pdata.Metric, expectedName string, startTime pdata.Timestamp) { +func assertNetworkIOMetricValid(t *testing.T, metric pmetric.Metric, expectedName string, startTime pcommon.Timestamp) { assert.Equal(t, expectedName, metric.Name()) if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, metric, startTime) } assert.GreaterOrEqual(t, metric.Sum().DataPoints().Len(), 2) internal.AssertSumMetricHasAttribute(t, metric, 0, "device") - internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pdata.NewValueString(metadata.AttributeDirection.Transmit)) - internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction", pdata.NewValueString(metadata.AttributeDirection.Receive)) + internal.AssertSumMetricHasAttributeValue(t, metric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Transmit)) + internal.AssertSumMetricHasAttributeValue(t, metric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Receive)) } -func assertNetworkConnectionsMetricValid(t *testing.T, metric pdata.Metric) { +func assertNetworkConnectionsMetricValid(t *testing.T, metric pmetric.Metric) { assert.Equal(t, metric.Name(), "system.network.connections") - internal.AssertSumMetricHasAttributeValue(t, metric, 0, "protocol", pdata.NewValueString(metadata.AttributeProtocol.Tcp)) + internal.AssertSumMetricHasAttributeValue(t, metric, 0, "protocol", pcommon.NewValueString(metadata.AttributeProtocol.Tcp)) internal.AssertSumMetricHasAttribute(t, metric, 0, "state") assert.Equal(t, 12, metric.Sum().DataPoints().Len()) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go index 0cc1be7c0efc..cdb1bc218c91 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -40,7 +41,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemPagingFaults struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -50,13 +51,13 @@ func (m *metricSystemPagingFaults) init() { m.data.SetName("system.paging.faults") m.data.SetDescription("The number of page faults.") m.data.SetUnit("{faults}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemPagingFaults) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, typeAttributeValue string) { +func (m *metricSystemPagingFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, typeAttributeValue string) { if !m.settings.Enabled { return } @@ -64,7 +65,7 @@ func (m *metricSystemPagingFaults) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Type, pdata.NewValueString(typeAttributeValue)) + dp.Attributes().Insert(A.Type, pcommon.NewValueString(typeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -75,7 +76,7 @@ func (m *metricSystemPagingFaults) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemPagingFaults) emit(metrics pdata.MetricSlice) { +func (m *metricSystemPagingFaults) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -86,14 +87,14 @@ func (m *metricSystemPagingFaults) emit(metrics pdata.MetricSlice) { func newMetricSystemPagingFaults(settings MetricSettings) metricSystemPagingFaults { m := metricSystemPagingFaults{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemPagingOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -103,13 +104,13 @@ func (m *metricSystemPagingOperations) init() { m.data.SetName("system.paging.operations") m.data.SetDescription("The number of paging operations.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemPagingOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, directionAttributeValue string, typeAttributeValue string) { +func (m *metricSystemPagingOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string, typeAttributeValue string) { if !m.settings.Enabled { return } @@ -117,8 +118,8 @@ func (m *metricSystemPagingOperations) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) - dp.Attributes().Insert(A.Type, pdata.NewValueString(typeAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Type, pcommon.NewValueString(typeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -129,7 +130,7 @@ func (m *metricSystemPagingOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemPagingOperations) emit(metrics pdata.MetricSlice) { +func (m *metricSystemPagingOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -140,14 +141,14 @@ func (m *metricSystemPagingOperations) emit(metrics pdata.MetricSlice) { func newMetricSystemPagingOperations(settings MetricSettings) metricSystemPagingOperations { m := metricSystemPagingOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemPagingUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -157,13 +158,13 @@ func (m *metricSystemPagingUsage) init() { m.data.SetName("system.paging.usage") m.data.SetDescription("Swap (unix) or pagefile (windows) usage.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemPagingUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, deviceAttributeValue string, stateAttributeValue string) { +func (m *metricSystemPagingUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, deviceAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -171,8 +172,8 @@ func (m *metricSystemPagingUsage) recordDataPoint(start pdata.Timestamp, ts pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -183,7 +184,7 @@ func (m *metricSystemPagingUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemPagingUsage) emit(metrics pdata.MetricSlice) { +func (m *metricSystemPagingUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -194,14 +195,14 @@ func (m *metricSystemPagingUsage) emit(metrics pdata.MetricSlice) { func newMetricSystemPagingUsage(settings MetricSettings) metricSystemPagingUsage { m := metricSystemPagingUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemPagingUtilization struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -211,11 +212,11 @@ func (m *metricSystemPagingUtilization) init() { m.data.SetName("system.paging.utilization") m.data.SetDescription("Swap (unix) or pagefile (windows) utilization.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemPagingUtilization) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, deviceAttributeValue string, stateAttributeValue string) { +func (m *metricSystemPagingUtilization) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, deviceAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -223,8 +224,8 @@ func (m *metricSystemPagingUtilization) recordDataPoint(start pdata.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Device, pdata.NewValueString(deviceAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Device, pcommon.NewValueString(deviceAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -235,7 +236,7 @@ func (m *metricSystemPagingUtilization) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemPagingUtilization) emit(metrics pdata.MetricSlice) { +func (m *metricSystemPagingUtilization) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -246,7 +247,7 @@ func (m *metricSystemPagingUtilization) emit(metrics pdata.MetricSlice) { func newMetricSystemPagingUtilization(settings MetricSettings) metricSystemPagingUtilization { m := metricSystemPagingUtilization{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -255,10 +256,10 @@ func newMetricSystemPagingUtilization(settings MetricSettings) metricSystemPagin // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemPagingFaults metricSystemPagingFaults metricSystemPagingOperations metricSystemPagingOperations metricSystemPagingUsage metricSystemPagingUsage @@ -269,7 +270,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -277,8 +278,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemPagingFaults: newMetricSystemPagingFaults(settings.SystemPagingFaults), metricSystemPagingOperations: newMetricSystemPagingOperations(settings.SystemPagingOperations), metricSystemPagingUsage: newMetricSystemPagingUsage(settings.SystemPagingUsage), @@ -291,7 +292,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -301,14 +302,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -330,37 +331,37 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemPagingFaultsDataPoint adds a data point to system.paging.faults metric. -func (mb *MetricsBuilder) RecordSystemPagingFaultsDataPoint(ts pdata.Timestamp, val int64, typeAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemPagingFaultsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue string) { mb.metricSystemPagingFaults.recordDataPoint(mb.startTime, ts, val, typeAttributeValue) } // RecordSystemPagingOperationsDataPoint adds a data point to system.paging.operations metric. -func (mb *MetricsBuilder) RecordSystemPagingOperationsDataPoint(ts pdata.Timestamp, val int64, directionAttributeValue string, typeAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemPagingOperationsDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string, typeAttributeValue string) { mb.metricSystemPagingOperations.recordDataPoint(mb.startTime, ts, val, directionAttributeValue, typeAttributeValue) } // RecordSystemPagingUsageDataPoint adds a data point to system.paging.usage metric. -func (mb *MetricsBuilder) RecordSystemPagingUsageDataPoint(ts pdata.Timestamp, val int64, deviceAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemPagingUsageDataPoint(ts pcommon.Timestamp, val int64, deviceAttributeValue string, stateAttributeValue string) { mb.metricSystemPagingUsage.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, stateAttributeValue) } // RecordSystemPagingUtilizationDataPoint adds a data point to system.paging.utilization metric. -func (mb *MetricsBuilder) RecordSystemPagingUtilizationDataPoint(ts pdata.Timestamp, val float64, deviceAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemPagingUtilizationDataPoint(ts pcommon.Timestamp, val float64, deviceAttributeValue string, stateAttributeValue string) { mb.metricSystemPagingUtilization.recordDataPoint(mb.startTime, ts, val, deviceAttributeValue, stateAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go index c32239d87fa9..105ca56d24c5 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_others.go @@ -25,7 +25,8 @@ import ( "github.com/shirou/gopsutil/v3/host" "github.com/shirou/gopsutil/v3/mem" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata" @@ -58,11 +59,11 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { var errors scrapererror.ScrapeErrors err := s.scrapePagingUsageMetric() @@ -79,7 +80,7 @@ func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { } func (s *scraper) scrapePagingUsageMetric() error { - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) pageFileStats, err := s.getPageFileStats() if err != nil { return fmt.Errorf("failed to read page file stats: %w", err) @@ -90,7 +91,7 @@ func (s *scraper) scrapePagingUsageMetric() error { return nil } -func (s *scraper) recordPagingUsageDataPoints(now pdata.Timestamp, pageFileStats []*pageFileStats) { +func (s *scraper) recordPagingUsageDataPoints(now pcommon.Timestamp, pageFileStats []*pageFileStats) { for _, pageFile := range pageFileStats { s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.usedBytes), pageFile.deviceName, metadata.AttributeState.Used) s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.freeBytes), pageFile.deviceName, metadata.AttributeState.Free) @@ -100,7 +101,7 @@ func (s *scraper) recordPagingUsageDataPoints(now pdata.Timestamp, pageFileStats } } -func (s *scraper) recordPagingUtilizationDataPoints(now pdata.Timestamp, pageFileStats []*pageFileStats) { +func (s *scraper) recordPagingUtilizationDataPoints(now pcommon.Timestamp, pageFileStats []*pageFileStats) { for _, pageFile := range pageFileStats { s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.usedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Used) s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.freeBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Free) @@ -111,7 +112,7 @@ func (s *scraper) recordPagingUtilizationDataPoints(now pdata.Timestamp, pageFil } func (s *scraper) scrapePagingMetrics() error { - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) swap, err := s.swapMemory() if err != nil { return fmt.Errorf("failed to read swap info: %w", err) @@ -122,14 +123,14 @@ func (s *scraper) scrapePagingMetrics() error { return nil } -func (s *scraper) recordPagingOperationsDataPoints(now pdata.Timestamp, swap *mem.SwapMemoryStat) { +func (s *scraper) recordPagingOperationsDataPoints(now pcommon.Timestamp, swap *mem.SwapMemoryStat) { s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sin), metadata.AttributeDirection.PageIn, metadata.AttributeType.Major) s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.Sout), metadata.AttributeDirection.PageOut, metadata.AttributeType.Major) s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgIn), metadata.AttributeDirection.PageIn, metadata.AttributeType.Minor) s.mb.RecordSystemPagingOperationsDataPoint(now, int64(swap.PgOut), metadata.AttributeDirection.PageOut, metadata.AttributeType.Minor) } -func (s *scraper) recordPageFaultsDataPoints(now pdata.Timestamp, swap *mem.SwapMemoryStat) { +func (s *scraper) recordPageFaultsDataPoints(now pcommon.Timestamp, swap *mem.SwapMemoryStat) { s.mb.RecordSystemPagingFaultsDataPoint(now, int64(swap.PgMajFault), metadata.AttributeType.Major) s.mb.RecordSystemPagingFaultsDataPoint(now, int64(swap.PgFault-swap.PgMajFault), metadata.AttributeType.Minor) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go index 7df457c42b3d..68e078d1df27 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_test.go @@ -23,7 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/internal/metadata" @@ -34,7 +35,7 @@ func TestScrape(t *testing.T) { name string config Config bootTimeFunc func() (uint64, error) - expectedStartTime pdata.Timestamp + expectedStartTime pcommon.Timestamp initializationErr string } @@ -103,12 +104,12 @@ func TestScrape(t *testing.T) { } } -func assertPagingUsageMetricValid(t *testing.T, hostPagingUsageMetric pdata.Metric) { - expected := pdata.NewMetric() +func assertPagingUsageMetricValid(t *testing.T, hostPagingUsageMetric pmetric.Metric) { + expected := pmetric.NewMetric() expected.SetName("system.paging.usage") expected.SetDescription("Swap (unix) or pagefile (windows) usage.") expected.SetUnit("By") - expected.SetDataType(pdata.MetricDataTypeSum) + expected.SetDataType(pmetric.MetricDataTypeSum) internal.AssertDescriptorEqual(t, expected, hostPagingUsageMetric) // it's valid for a system to have no swap space / paging file, so if no data points were returned, do no validation @@ -124,11 +125,11 @@ func assertPagingUsageMetricValid(t *testing.T, hostPagingUsageMetric pdata.Metr } assert.GreaterOrEqual(t, hostPagingUsageMetric.Sum().DataPoints().Len(), expectedDataPoints) - internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 0, "state", pdata.NewValueString(metadata.AttributeState.Used)) - internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 1, "state", pdata.NewValueString(metadata.AttributeState.Free)) + internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 0, "state", pcommon.NewValueString(metadata.AttributeState.Used)) + internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 1, "state", pcommon.NewValueString(metadata.AttributeState.Free)) // Windows and Linux do not support cached state label if runtime.GOOS != "windows" && runtime.GOOS != "linux" { - internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 2, "state", pdata.NewValueString(metadata.AttributeState.Cached)) + internal.AssertSumMetricHasAttributeValue(t, hostPagingUsageMetric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Cached)) } // on Windows and Linux, also expect the page file device name label @@ -138,12 +139,12 @@ func assertPagingUsageMetricValid(t *testing.T, hostPagingUsageMetric pdata.Metr } } -func assertPagingUtilizationMetricValid(t *testing.T, hostPagingUtilizationMetric pdata.Metric) { - expected := pdata.NewMetric() +func assertPagingUtilizationMetricValid(t *testing.T, hostPagingUtilizationMetric pmetric.Metric) { + expected := pmetric.NewMetric() expected.SetName("system.paging.utilization") expected.SetDescription("Swap (unix) or pagefile (windows) utilization.") expected.SetUnit("1") - expected.SetDataType(pdata.MetricDataTypeGauge) + expected.SetDataType(pmetric.MetricDataTypeGauge) internal.AssertDescriptorEqual(t, expected, hostPagingUtilizationMetric) // it's valid for a system to have no swap space / paging file, so if no data points were returned, do no validation @@ -159,11 +160,11 @@ func assertPagingUtilizationMetricValid(t *testing.T, hostPagingUtilizationMetri } assert.GreaterOrEqual(t, hostPagingUtilizationMetric.Gauge().DataPoints().Len(), expectedDataPoints) - internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 0, "state", pdata.NewValueString(metadata.AttributeState.Used)) - internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 1, "state", pdata.NewValueString(metadata.AttributeState.Free)) + internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 0, "state", pcommon.NewValueString(metadata.AttributeState.Used)) + internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 1, "state", pcommon.NewValueString(metadata.AttributeState.Free)) // Windows and Linux do not support cached state label if runtime.GOOS != "windows" && runtime.GOOS != "linux" { - internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 2, "state", pdata.NewValueString(metadata.AttributeState.Cached)) + internal.AssertGaugeMetricHasAttributeValue(t, hostPagingUtilizationMetric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Cached)) } // on Windows and Linux, also expect the page file device name label @@ -173,12 +174,12 @@ func assertPagingUtilizationMetricValid(t *testing.T, hostPagingUtilizationMetri } } -func assertPagingOperationsMetricValid(t *testing.T, pagingMetric pdata.Metric, startTime pdata.Timestamp) { - expected := pdata.NewMetric() +func assertPagingOperationsMetricValid(t *testing.T, pagingMetric pmetric.Metric, startTime pcommon.Timestamp) { + expected := pmetric.NewMetric() expected.SetName("system.paging.operations") expected.SetDescription("The number of paging operations.") expected.SetUnit("{operations}") - expected.SetDataType(pdata.MetricDataTypeSum) + expected.SetDataType(pmetric.MetricDataTypeSum) internal.AssertDescriptorEqual(t, expected, pagingMetric) if startTime != 0 { @@ -192,24 +193,24 @@ func assertPagingOperationsMetricValid(t *testing.T, pagingMetric pdata.Metric, } assert.Equal(t, expectedDataPoints, pagingMetric.Sum().DataPoints().Len()) - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "type", pdata.NewValueString(metadata.AttributeType.Major)) - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "direction", pdata.NewValueString(metadata.AttributeDirection.PageIn)) - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "type", pdata.NewValueString(metadata.AttributeType.Major)) - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "direction", pdata.NewValueString(metadata.AttributeDirection.PageOut)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "type", pcommon.NewValueString(metadata.AttributeType.Major)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageIn)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "type", pcommon.NewValueString(metadata.AttributeType.Major)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageOut)) if runtime.GOOS != "windows" { - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "type", pdata.NewValueString(metadata.AttributeType.Minor)) - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "direction", pdata.NewValueString(metadata.AttributeDirection.PageIn)) - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "type", pdata.NewValueString(metadata.AttributeType.Minor)) - internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "direction", pdata.NewValueString(metadata.AttributeDirection.PageOut)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "type", pcommon.NewValueString(metadata.AttributeType.Minor)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 2, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageIn)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "type", pcommon.NewValueString(metadata.AttributeType.Minor)) + internal.AssertSumMetricHasAttributeValue(t, pagingMetric, 3, "direction", pcommon.NewValueString(metadata.AttributeDirection.PageOut)) } } -func assertPageFaultsMetricValid(t *testing.T, pageFaultsMetric pdata.Metric, startTime pdata.Timestamp) { - expected := pdata.NewMetric() +func assertPageFaultsMetricValid(t *testing.T, pageFaultsMetric pmetric.Metric, startTime pcommon.Timestamp) { + expected := pmetric.NewMetric() expected.SetName("system.paging.faults") expected.SetDescription("The number of page faults.") expected.SetUnit("{faults}") - expected.SetDataType(pdata.MetricDataTypeSum) + expected.SetDataType(pmetric.MetricDataTypeSum) internal.AssertDescriptorEqual(t, expected, pageFaultsMetric) if startTime != 0 { @@ -217,6 +218,6 @@ func assertPageFaultsMetricValid(t *testing.T, pageFaultsMetric pdata.Metric, st } assert.Equal(t, 2, pageFaultsMetric.Sum().DataPoints().Len()) - internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 0, "type", pdata.NewValueString(metadata.AttributeType.Major)) - internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 1, "type", pdata.NewValueString(metadata.AttributeType.Minor)) + internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 0, "type", pcommon.NewValueString(metadata.AttributeType.Major)) + internal.AssertSumMetricHasAttributeValue(t, pageFaultsMetric, 1, "type", pcommon.NewValueString(metadata.AttributeType.Minor)) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go index 01ff74784e08..f36010a250aa 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/pagingscraper/paging_scraper_windows.go @@ -24,7 +24,8 @@ import ( "github.com/shirou/gopsutil/v3/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/perfcounters" @@ -64,12 +65,12 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return s.perfCounterScraper.Initialize(memory) } -func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { +func (s *scraper) scrape(context.Context) (pmetric.Metrics, error) { var errors scrapererror.ScrapeErrors err := s.scrapePagingUsageMetric() @@ -86,7 +87,7 @@ func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { } func (s *scraper) scrapePagingUsageMetric() error { - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) pageFiles, err := s.pageFileStats() if err != nil { return fmt.Errorf("failed to read page file stats: %w", err) @@ -98,14 +99,14 @@ func (s *scraper) scrapePagingUsageMetric() error { return nil } -func (s *scraper) recordPagingUsageDataPoints(now pdata.Timestamp, pageFiles []*pageFileStats) { +func (s *scraper) recordPagingUsageDataPoints(now pcommon.Timestamp, pageFiles []*pageFileStats) { for _, pageFile := range pageFiles { s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.usedBytes), pageFile.deviceName, metadata.AttributeState.Used) s.mb.RecordSystemPagingUsageDataPoint(now, int64(pageFile.freeBytes), pageFile.deviceName, metadata.AttributeState.Free) } } -func (s *scraper) recordPagingUtilizationDataPoints(now pdata.Timestamp, pageFiles []*pageFileStats) { +func (s *scraper) recordPagingUtilizationDataPoints(now pcommon.Timestamp, pageFiles []*pageFileStats) { for _, pageFile := range pageFiles { s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.usedBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Used) s.mb.RecordSystemPagingUtilizationDataPoint(now, float64(pageFile.freeBytes)/float64(pageFile.totalBytes), pageFile.deviceName, metadata.AttributeState.Free) @@ -113,7 +114,7 @@ func (s *scraper) recordPagingUtilizationDataPoints(now pdata.Timestamp, pageFil } func (s *scraper) scrapePagingOperationsMetric() error { - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) counters, err := s.perfCounterScraper.Scrape() if err != nil { @@ -136,7 +137,7 @@ func (s *scraper) scrapePagingOperationsMetric() error { return nil } -func (s *scraper) recordPagingOperationsDataPoints(now pdata.Timestamp, memoryCounterValues *perfcounters.CounterValues) { +func (s *scraper) recordPagingOperationsDataPoints(now pcommon.Timestamp, memoryCounterValues *perfcounters.CounterValues) { s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageReadsPerSec], metadata.AttributeDirection.PageIn, metadata.AttributeType.Major) s.mb.RecordSystemPagingOperationsDataPoint(now, memoryCounterValues.Values[pageWritesPerSec], metadata.AttributeDirection.PageOut, metadata.AttributeType.Major) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go index 22199624fdfb..36eef1d1d813 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -32,7 +33,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricSystemProcessesCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -42,13 +43,13 @@ func (m *metricSystemProcessesCount) init() { m.data.SetName("system.processes.count") m.data.SetDescription("Total number of processes in each state.") m.data.SetUnit("{processes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricSystemProcessesCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, statusAttributeValue string) { +func (m *metricSystemProcessesCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, statusAttributeValue string) { if !m.settings.Enabled { return } @@ -56,7 +57,7 @@ func (m *metricSystemProcessesCount) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Status, pdata.NewValueString(statusAttributeValue)) + dp.Attributes().Insert(A.Status, pcommon.NewValueString(statusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -67,7 +68,7 @@ func (m *metricSystemProcessesCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemProcessesCount) emit(metrics pdata.MetricSlice) { +func (m *metricSystemProcessesCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -78,14 +79,14 @@ func (m *metricSystemProcessesCount) emit(metrics pdata.MetricSlice) { func newMetricSystemProcessesCount(settings MetricSettings) metricSystemProcessesCount { m := metricSystemProcessesCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricSystemProcessesCreated struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -95,12 +96,12 @@ func (m *metricSystemProcessesCreated) init() { m.data.SetName("system.processes.created") m.data.SetDescription("Total number of created processes.") m.data.SetUnit("{processes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricSystemProcessesCreated) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricSystemProcessesCreated) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -118,7 +119,7 @@ func (m *metricSystemProcessesCreated) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricSystemProcessesCreated) emit(metrics pdata.MetricSlice) { +func (m *metricSystemProcessesCreated) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -129,7 +130,7 @@ func (m *metricSystemProcessesCreated) emit(metrics pdata.MetricSlice) { func newMetricSystemProcessesCreated(settings MetricSettings) metricSystemProcessesCreated { m := metricSystemProcessesCreated{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -138,10 +139,10 @@ func newMetricSystemProcessesCreated(settings MetricSettings) metricSystemProces // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricSystemProcessesCount metricSystemProcessesCount metricSystemProcessesCreated metricSystemProcessesCreated } @@ -150,7 +151,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -158,8 +159,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricSystemProcessesCount: newMetricSystemProcessesCount(settings.SystemProcessesCount), metricSystemProcessesCreated: newMetricSystemProcessesCreated(settings.SystemProcessesCreated), } @@ -170,7 +171,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -180,14 +181,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -207,27 +208,27 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordSystemProcessesCountDataPoint adds a data point to system.processes.count metric. -func (mb *MetricsBuilder) RecordSystemProcessesCountDataPoint(ts pdata.Timestamp, val int64, statusAttributeValue string) { +func (mb *MetricsBuilder) RecordSystemProcessesCountDataPoint(ts pcommon.Timestamp, val int64, statusAttributeValue string) { mb.metricSystemProcessesCount.recordDataPoint(mb.startTime, ts, val, statusAttributeValue) } // RecordSystemProcessesCreatedDataPoint adds a data point to system.processes.created metric. -func (mb *MetricsBuilder) RecordSystemProcessesCreatedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordSystemProcessesCreatedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricSystemProcessesCreated.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go index dc3d562f6100..4ae87ad7b060 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper.go @@ -22,7 +22,8 @@ import ( "github.com/shirou/gopsutil/v3/load" "github.com/shirou/gopsutil/v3/process" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processesscraper/internal/metadata" @@ -81,20 +82,20 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() metrics := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() metrics.EnsureCapacity(metricsLength) processMetadata, err := s.getProcessesMetadata() if err != nil { - return pdata.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLength) + return pmetric.NewMetrics(), scrapererror.NewPartialScrapeError(err, metricsLength) } if enableProcessesCount && processMetadata.countByStatus != nil { diff --git a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go index 2a4336603cae..58835e9b8af4 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processesscraper/processes_scraper_test.go @@ -25,7 +25,8 @@ import ( "github.com/shirou/gopsutil/v3/process" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal" @@ -43,7 +44,7 @@ func TestScrape(t *testing.T) { getMiscStats func() (*load.MiscStat, error) getProcesses func() ([]proc, error) expectedErr string - validate func(*testing.T, pdata.MetricSlice) + validate func(*testing.T, pmetric.MetricSlice) } testCases := []testCase{{ @@ -125,7 +126,7 @@ func TestScrape(t *testing.T) { } } -func validateRealData(t *testing.T, metrics pdata.MetricSlice) { +func validateRealData(t *testing.T, metrics pmetric.MetricSlice) { assert := assert.New(t) metricIndex := 0 @@ -158,11 +159,11 @@ func validateRealData(t *testing.T, metrics pdata.MetricSlice) { } } -func validateStartTime(t *testing.T, metrics pdata.MetricSlice) { +func validateStartTime(t *testing.T, metrics pmetric.MetricSlice) { startTime, err := host.BootTime() assert.NoError(t, err) for i := 0; i < metricsLength; i++ { - internal.AssertSumMetricStartTimeEquals(t, metrics.At(i), pdata.Timestamp(startTime*1e9)) + internal.AssertSumMetricStartTimeEquals(t, metrics.At(i), pcommon.Timestamp(startTime*1e9)) } } @@ -194,7 +195,7 @@ func (f fakeProcess) Status() ([]string, error) { return []string{string(f)}, nil } -func validateFakeData(t *testing.T, metrics pdata.MetricSlice) { +func validateFakeData(t *testing.T, metrics pmetric.MetricSlice) { assert := assert.New(t) metricIndex := 0 if expectProcessesCountMetric { diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go index c48ffce8b4e0..6ef6060dc445 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata/generated_metrics_v2.go @@ -5,8 +5,9 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.9.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -40,7 +41,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricProcessCPUTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -50,13 +51,13 @@ func (m *metricProcessCPUTime) init() { m.data.SetName("process.cpu.time") m.data.SetDescription("Total CPU seconds broken down by different states.") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricProcessCPUTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, stateAttributeValue string) { +func (m *metricProcessCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -64,7 +65,7 @@ func (m *metricProcessCPUTime) recordDataPoint(start pdata.Timestamp, ts pdata.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -75,7 +76,7 @@ func (m *metricProcessCPUTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricProcessCPUTime) emit(metrics pdata.MetricSlice) { +func (m *metricProcessCPUTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -86,14 +87,14 @@ func (m *metricProcessCPUTime) emit(metrics pdata.MetricSlice) { func newMetricProcessCPUTime(settings MetricSettings) metricProcessCPUTime { m := metricProcessCPUTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricProcessDiskIo struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -103,13 +104,13 @@ func (m *metricProcessDiskIo) init() { m.data.SetName("process.disk.io") m.data.SetDescription("Disk bytes transferred.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricProcessDiskIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, directionAttributeValue string) { +func (m *metricProcessDiskIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -117,7 +118,7 @@ func (m *metricProcessDiskIo) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -128,7 +129,7 @@ func (m *metricProcessDiskIo) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricProcessDiskIo) emit(metrics pdata.MetricSlice) { +func (m *metricProcessDiskIo) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -139,14 +140,14 @@ func (m *metricProcessDiskIo) emit(metrics pdata.MetricSlice) { func newMetricProcessDiskIo(settings MetricSettings) metricProcessDiskIo { m := metricProcessDiskIo{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricProcessMemoryPhysicalUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -156,12 +157,12 @@ func (m *metricProcessMemoryPhysicalUsage) init() { m.data.SetName("process.memory.physical_usage") m.data.SetDescription("The amount of physical memory in use.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricProcessMemoryPhysicalUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricProcessMemoryPhysicalUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -179,7 +180,7 @@ func (m *metricProcessMemoryPhysicalUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricProcessMemoryPhysicalUsage) emit(metrics pdata.MetricSlice) { +func (m *metricProcessMemoryPhysicalUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -190,14 +191,14 @@ func (m *metricProcessMemoryPhysicalUsage) emit(metrics pdata.MetricSlice) { func newMetricProcessMemoryPhysicalUsage(settings MetricSettings) metricProcessMemoryPhysicalUsage { m := metricProcessMemoryPhysicalUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricProcessMemoryVirtualUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -207,12 +208,12 @@ func (m *metricProcessMemoryVirtualUsage) init() { m.data.SetName("process.memory.virtual_usage") m.data.SetDescription("Virtual memory size.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricProcessMemoryVirtualUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricProcessMemoryVirtualUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -230,7 +231,7 @@ func (m *metricProcessMemoryVirtualUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricProcessMemoryVirtualUsage) emit(metrics pdata.MetricSlice) { +func (m *metricProcessMemoryVirtualUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -241,7 +242,7 @@ func (m *metricProcessMemoryVirtualUsage) emit(metrics pdata.MetricSlice) { func newMetricProcessMemoryVirtualUsage(settings MetricSettings) metricProcessMemoryVirtualUsage { m := metricProcessMemoryVirtualUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -250,10 +251,10 @@ func newMetricProcessMemoryVirtualUsage(settings MetricSettings) metricProcessMe // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricProcessCPUTime metricProcessCPUTime metricProcessDiskIo metricProcessDiskIo metricProcessMemoryPhysicalUsage metricProcessMemoryPhysicalUsage @@ -264,7 +265,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -272,8 +273,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricProcessCPUTime: newMetricProcessCPUTime(settings.ProcessCPUTime), metricProcessDiskIo: newMetricProcessDiskIo(settings.ProcessDiskIo), metricProcessMemoryPhysicalUsage: newMetricProcessMemoryPhysicalUsage(settings.ProcessMemoryPhysicalUsage), @@ -286,7 +287,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -296,46 +297,46 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // WithProcessCommand sets provided value as "process.command" attribute for current resource. func WithProcessCommand(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("process.command", val) } } // WithProcessCommandLine sets provided value as "process.command_line" attribute for current resource. func WithProcessCommandLine(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("process.command_line", val) } } // WithProcessExecutableName sets provided value as "process.executable.name" attribute for current resource. func WithProcessExecutableName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("process.executable.name", val) } } // WithProcessExecutablePath sets provided value as "process.executable.path" attribute for current resource. func WithProcessExecutablePath(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("process.executable.path", val) } } // WithProcessOwner sets provided value as "process.owner" attribute for current resource. func WithProcessOwner(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("process.owner", val) } } // WithProcessPid sets provided value as "process.pid" attribute for current resource. func WithProcessPid(val int64) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertInt("process.pid", val) } } @@ -345,7 +346,7 @@ func WithProcessPid(val int64) ResourceOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.SetSchemaUrl(conventions.SchemaURL) rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { @@ -367,37 +368,37 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordProcessCPUTimeDataPoint adds a data point to process.cpu.time metric. -func (mb *MetricsBuilder) RecordProcessCPUTimeDataPoint(ts pdata.Timestamp, val float64, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordProcessCPUTimeDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue string) { mb.metricProcessCPUTime.recordDataPoint(mb.startTime, ts, val, stateAttributeValue) } // RecordProcessDiskIoDataPoint adds a data point to process.disk.io metric. -func (mb *MetricsBuilder) RecordProcessDiskIoDataPoint(ts pdata.Timestamp, val int64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordProcessDiskIoDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) { mb.metricProcessDiskIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordProcessMemoryPhysicalUsageDataPoint adds a data point to process.memory.physical_usage metric. -func (mb *MetricsBuilder) RecordProcessMemoryPhysicalUsageDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordProcessMemoryPhysicalUsageDataPoint(ts pcommon.Timestamp, val int64) { mb.metricProcessMemoryPhysicalUsage.recordDataPoint(mb.startTime, ts, val) } // RecordProcessMemoryVirtualUsageDataPoint adds a data point to process.memory.virtual_usage metric. -func (mb *MetricsBuilder) RecordProcessMemoryVirtualUsageDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordProcessMemoryVirtualUsageDataPoint(ts pcommon.Timestamp, val int64) { mb.metricProcessMemoryVirtualUsage.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go index 7078c787aa08..4c705e8ad5c0 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process.go @@ -25,7 +25,7 @@ import ( // processMetadata stores process related metadata along // with the process handle, and provides a function to -// initialize a pdata.Resource with the metadata +// initialize a pcommon.Resource with the metadata type processMetadata struct { pid int32 diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go index c669d9fdb2e2..8fac209b900d 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go @@ -21,7 +21,8 @@ import ( "github.com/shirou/gopsutil/v3/host" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -77,25 +78,25 @@ func (s *scraper) start(context.Context, component.Host) error { return err } - s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pdata.Timestamp(bootTime*1e9))) + s.mb = metadata.NewMetricsBuilder(s.config.Metrics, metadata.WithStartTime(pcommon.Timestamp(bootTime*1e9))) return nil } -func (s *scraper) scrape(_ context.Context) (pdata.Metrics, error) { +func (s *scraper) scrape(_ context.Context) (pmetric.Metrics, error) { var errs scrapererror.ScrapeErrors metadata, err := s.getProcessMetadata() if err != nil { partialErr, isPartial := err.(scrapererror.PartialScrapeError) if !isPartial { - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } errs.AddPartial(partialErr.Failed, partialErr) } for _, md := range metadata { - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) if err = s.scrapeAndAppendCPUTimeMetric(now, md.handle); err != nil { errs.AddPartial(cpuMetricsLen, fmt.Errorf("error reading cpu times for process %q (pid %v): %w", md.executable.name, md.pid, err)) @@ -170,7 +171,7 @@ func (s *scraper) getProcessMetadata() ([]*processMetadata, error) { return metadata, errs.Combine() } -func (s *scraper) scrapeAndAppendCPUTimeMetric(now pdata.Timestamp, handle processHandle) error { +func (s *scraper) scrapeAndAppendCPUTimeMetric(now pcommon.Timestamp, handle processHandle) error { times, err := handle.Times() if err != nil { return err @@ -180,7 +181,7 @@ func (s *scraper) scrapeAndAppendCPUTimeMetric(now pdata.Timestamp, handle proce return nil } -func (s *scraper) scrapeAndAppendMemoryUsageMetrics(now pdata.Timestamp, handle processHandle) error { +func (s *scraper) scrapeAndAppendMemoryUsageMetrics(now pcommon.Timestamp, handle processHandle) error { mem, err := handle.MemoryInfo() if err != nil { return err @@ -191,7 +192,7 @@ func (s *scraper) scrapeAndAppendMemoryUsageMetrics(now pdata.Timestamp, handle return nil } -func (s *scraper) scrapeAndAppendDiskIOMetric(now pdata.Timestamp, handle processHandle) error { +func (s *scraper) scrapeAndAppendDiskIOMetric(now pcommon.Timestamp, handle processHandle) error { io, err := handle.IOCounters() if err != nil { return err diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go index 9ca8687dded1..6abca906e703 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_linux.go @@ -19,12 +19,12 @@ package processscraper // import "github.com/open-telemetry/opentelemetry-collec import ( "github.com/shirou/gopsutil/v3/cpu" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata" ) -func (s *scraper) recordCPUTimeMetric(now pdata.Timestamp, cpuTime *cpu.TimesStat) { +func (s *scraper) recordCPUTimeMetric(now pcommon.Timestamp, cpuTime *cpu.TimesStat) { s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.User, metadata.AttributeState.User) s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.System, metadata.AttributeState.System) s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.Iowait, metadata.AttributeState.Wait) diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go index b22610e19002..a435c8f065c0 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_others.go @@ -19,10 +19,10 @@ package processscraper // import "github.com/open-telemetry/opentelemetry-collec import ( "github.com/shirou/gopsutil/v3/cpu" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) -func (s *scraper) recordCPUTimeMetric(now pdata.Timestamp, cpuTime *cpu.TimesStat) {} +func (s *scraper) recordCPUTimeMetric(now pcommon.Timestamp, cpuTime *cpu.TimesStat) {} func getProcessExecutable(processHandle) (*executableMetadata, error) { return nil, nil diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go index 1941f19a82b4..4de1cf8a9b5b 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go @@ -27,8 +27,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/processor/filterset" @@ -77,7 +78,7 @@ func TestScrape(t *testing.T) { assertSameTimeStampForAllMetricsWithinResource(t, md.ResourceMetrics()) } -func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { +func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice) { for i := 0; i < resourceMetrics.Len(); i++ { attr := resourceMetrics.At(0).Resource().Attributes() internal.AssertContainsAttribute(t, attr, conventions.AttributeProcessPID) @@ -89,20 +90,20 @@ func assertProcessResourceAttributesExist(t *testing.T, resourceMetrics pdata.Re } } -func assertCPUTimeMetricValid(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice, startTime pdata.Timestamp) { +func assertCPUTimeMetricValid(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice, startTime pcommon.Timestamp) { cpuTimeMetric := getMetric(t, "process.cpu.time", resourceMetrics) assert.Equal(t, "process.cpu.time", cpuTimeMetric.Name()) if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, cpuTimeMetric, startTime) } - internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 0, "state", pdata.NewValueString(metadata.AttributeState.User)) - internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 1, "state", pdata.NewValueString(metadata.AttributeState.System)) + internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 0, "state", pcommon.NewValueString(metadata.AttributeState.User)) + internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 1, "state", pcommon.NewValueString(metadata.AttributeState.System)) if runtime.GOOS == "linux" { - internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 2, "state", pdata.NewValueString(metadata.AttributeState.Wait)) + internal.AssertSumMetricHasAttributeValue(t, cpuTimeMetric, 2, "state", pcommon.NewValueString(metadata.AttributeState.Wait)) } } -func assertMemoryUsageMetricValid(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice, startTime pdata.Timestamp) { +func assertMemoryUsageMetricValid(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice, startTime pcommon.Timestamp) { physicalMemUsageMetric := getMetric(t, "process.memory.physical_usage", resourceMetrics) assert.Equal(t, "process.memory.physical_usage", physicalMemUsageMetric.Name()) virtualMemUsageMetric := getMetric(t, "process.memory.virtual_usage", resourceMetrics) @@ -114,17 +115,17 @@ func assertMemoryUsageMetricValid(t *testing.T, resourceMetrics pdata.ResourceMe } } -func assertDiskIOMetricValid(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice, startTime pdata.Timestamp) { +func assertDiskIOMetricValid(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice, startTime pcommon.Timestamp) { diskIOMetric := getMetric(t, "process.disk.io", resourceMetrics) assert.Equal(t, "process.disk.io", diskIOMetric.Name()) if startTime != 0 { internal.AssertSumMetricStartTimeEquals(t, diskIOMetric, startTime) } - internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 0, "direction", pdata.NewValueString(metadata.AttributeDirection.Read)) - internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 1, "direction", pdata.NewValueString(metadata.AttributeDirection.Write)) + internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 0, "direction", pcommon.NewValueString(metadata.AttributeDirection.Read)) + internal.AssertSumMetricHasAttributeValue(t, diskIOMetric, 1, "direction", pcommon.NewValueString(metadata.AttributeDirection.Write)) } -func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetrics pdata.ResourceMetricsSlice) { +func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetrics pmetric.ResourceMetricsSlice) { for i := 0; i < resourceMetrics.Len(); i++ { ilms := resourceMetrics.At(i).ScopeMetrics() for j := 0; j < ilms.Len(); j++ { @@ -133,7 +134,7 @@ func assertSameTimeStampForAllMetricsWithinResource(t *testing.T, resourceMetric } } -func getMetric(t *testing.T, expectedMetricName string, rms pdata.ResourceMetricsSlice) pdata.Metric { +func getMetric(t *testing.T, expectedMetricName string, rms pmetric.ResourceMetricsSlice) pmetric.Metric { for i := 0; i < rms.Len(); i++ { metrics := getMetricSlice(t, rms.At(i)) for j := 0; j < metrics.Len(); j++ { @@ -145,10 +146,10 @@ func getMetric(t *testing.T, expectedMetricName string, rms pdata.ResourceMetric } require.Fail(t, fmt.Sprintf("no metric with name %s was returned", expectedMetricName)) - return pdata.NewMetric() + return pmetric.NewMetric() } -func getMetricSlice(t *testing.T, rm pdata.ResourceMetrics) pdata.MetricSlice { +func getMetricSlice(t *testing.T, rm pmetric.ResourceMetrics) pmetric.MetricSlice { ilms := rm.ScopeMetrics() require.Equal(t, 1, ilms.Len()) return ilms.At(0).Metrics() diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go index b221d44bbdfc..6f46b0d1bfa6 100644 --- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go +++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_windows.go @@ -22,12 +22,12 @@ import ( "regexp" "github.com/shirou/gopsutil/v3/cpu" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/processscraper/internal/metadata" ) -func (s *scraper) recordCPUTimeMetric(now pdata.Timestamp, cpuTime *cpu.TimesStat) { +func (s *scraper) recordCPUTimeMetric(now pcommon.Timestamp, cpuTime *cpu.TimesStat) { s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.User, metadata.AttributeState.User) s.mb.RecordProcessCPUTimeDataPoint(now, cpuTime.System, metadata.AttributeState.System) } diff --git a/receiver/hostmetricsreceiver/internal/testutils.go b/receiver/hostmetricsreceiver/internal/testutils.go index 3cdc6e293a4e..369d2736bbc8 100644 --- a/receiver/hostmetricsreceiver/internal/testutils.go +++ b/receiver/hostmetricsreceiver/internal/testutils.go @@ -19,65 +19,66 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) -func AssertContainsAttribute(t *testing.T, attr pdata.Map, key string) { +func AssertContainsAttribute(t *testing.T, attr pcommon.Map, key string) { _, ok := attr.Get(key) assert.True(t, ok) } -func AssertDescriptorEqual(t *testing.T, expected pdata.Metric, actual pdata.Metric) { +func AssertDescriptorEqual(t *testing.T, expected pmetric.Metric, actual pmetric.Metric) { assert.Equal(t, expected.Name(), actual.Name()) assert.Equal(t, expected.Description(), actual.Description()) assert.Equal(t, expected.Unit(), actual.Unit()) assert.Equal(t, expected.DataType(), actual.DataType()) } -func AssertSumMetricHasAttributeValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal pdata.Value) { +func AssertSumMetricHasAttributeValue(t *testing.T, metric pmetric.Metric, index int, labelName string, expectedVal pcommon.Value) { val, ok := metric.Sum().DataPoints().At(index).Attributes().Get(labelName) assert.Truef(t, ok, "Missing attribute %q in metric %q", labelName, metric.Name()) assert.Equal(t, expectedVal, val) } -func AssertSumMetricHasAttribute(t *testing.T, metric pdata.Metric, index int, labelName string) { +func AssertSumMetricHasAttribute(t *testing.T, metric pmetric.Metric, index int, labelName string) { _, ok := metric.Sum().DataPoints().At(index).Attributes().Get(labelName) assert.Truef(t, ok, "Missing attribute %q in metric %q", labelName, metric.Name()) } -func AssertSumMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { +func AssertSumMetricStartTimeEquals(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) { ddps := metric.Sum().DataPoints() for i := 0; i < ddps.Len(); i++ { require.Equal(t, startTime, ddps.At(i).StartTimestamp()) } } -func AssertGaugeMetricHasAttributeValue(t *testing.T, metric pdata.Metric, index int, labelName string, expectedVal pdata.Value) { +func AssertGaugeMetricHasAttributeValue(t *testing.T, metric pmetric.Metric, index int, labelName string, expectedVal pcommon.Value) { val, ok := metric.Gauge().DataPoints().At(index).Attributes().Get(labelName) assert.Truef(t, ok, "Missing attribute %q in metric %q", labelName, metric.Name()) assert.Equal(t, expectedVal, val) } -func AssertGaugeMetricHasAttribute(t *testing.T, metric pdata.Metric, index int, labelName string) { +func AssertGaugeMetricHasAttribute(t *testing.T, metric pmetric.Metric, index int, labelName string) { _, ok := metric.Gauge().DataPoints().At(index).Attributes().Get(labelName) assert.Truef(t, ok, "Missing attribute %q in metric %q", labelName, metric.Name()) } -func AssertGaugeMetricStartTimeEquals(t *testing.T, metric pdata.Metric, startTime pdata.Timestamp) { +func AssertGaugeMetricStartTimeEquals(t *testing.T, metric pmetric.Metric, startTime pcommon.Timestamp) { ddps := metric.Gauge().DataPoints() for i := 0; i < ddps.Len(); i++ { require.Equal(t, startTime, ddps.At(i).StartTimestamp()) } } -func AssertSameTimeStampForAllMetrics(t *testing.T, metrics pdata.MetricSlice) { +func AssertSameTimeStampForAllMetrics(t *testing.T, metrics pmetric.MetricSlice) { AssertSameTimeStampForMetrics(t, metrics, 0, metrics.Len()) } -func AssertSameTimeStampForMetrics(t *testing.T, metrics pdata.MetricSlice, startIdx, endIdx int) { - var ts pdata.Timestamp +func AssertSameTimeStampForMetrics(t *testing.T, metrics pmetric.MetricSlice, startIdx, endIdx int) { + var ts pcommon.Timestamp for i := startIdx; i < endIdx; i++ { metric := metrics.At(i) - if metric.DataType() == pdata.MetricDataTypeSum { + if metric.DataType() == pmetric.MetricDataTypeSum { ddps := metric.Sum().DataPoints() for j := 0; j < ddps.Len(); j++ { if ts == 0 { diff --git a/receiver/influxdbreceiver/go.mod b/receiver/influxdbreceiver/go.mod index fa6d5b43da4e..8146c5df2f75 100644 --- a/receiver/influxdbreceiver/go.mod +++ b/receiver/influxdbreceiver/go.mod @@ -7,7 +7,7 @@ require ( github.com/influxdata/influxdb-observability/influx2otel v0.2.17 github.com/influxdata/line-protocol/v2 v2.2.1 github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -22,22 +22,26 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/influxdbreceiver/go.sum b/receiver/influxdbreceiver/go.sum index ade1c19e5f8f..54c47822a873 100644 --- a/receiver/influxdbreceiver/go.sum +++ b/receiver/influxdbreceiver/go.sum @@ -132,8 +132,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -181,8 +181,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -195,10 +193,13 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -245,7 +246,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -270,12 +272,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -298,7 +301,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= diff --git a/receiver/jaegerreceiver/go.mod b/receiver/jaegerreceiver/go.mod index 548f5106c118..203f2b5806a6 100644 --- a/receiver/jaegerreceiver/go.mod +++ b/receiver/jaegerreceiver/go.mod @@ -10,8 +10,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/stretchr/testify v1.7.1 github.com/uber/jaeger-lib v2.4.1+incompatible - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 google.golang.org/grpc v1.45.0 @@ -29,7 +30,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -54,8 +55,8 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -69,3 +70,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/commo replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger => ../../pkg/translator/jaeger + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/jaegerreceiver/go.sum b/receiver/jaegerreceiver/go.sum index bfd9232b6aaf..0a7e1b6eaa33 100644 --- a/receiver/jaegerreceiver/go.sum +++ b/receiver/jaegerreceiver/go.sum @@ -25,7 +25,7 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -148,8 +148,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -204,7 +204,7 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -248,10 +248,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -263,7 +265,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -304,8 +306,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= @@ -332,8 +334,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/jaegerreceiver/jaeger_agent_test.go b/receiver/jaegerreceiver/jaeger_agent_test.go index d822e4896a0f..5a18d1bf6ffa 100644 --- a/receiver/jaegerreceiver/jaeger_agent_test.go +++ b/receiver/jaegerreceiver/jaeger_agent_test.go @@ -36,8 +36,9 @@ import ( "go.opentelemetry.io/collector/config/configgrpc" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/grpc" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" @@ -238,13 +239,13 @@ func newClientUDP(hostPort string, binary bool) (*agent.AgentClient, error) { } // Cannot use the testdata because timestamps are nanoseconds. -func generateTraceData() pdata.Traces { - td := pdata.NewTraces() +func generateTraceData() ptrace.Traces { + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().UpsertString(conventions.AttributeServiceName, "test") span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span.SetSpanID(pdata.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7})) - span.SetTraceID(pdata.NewTraceID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 7, 6, 5, 4, 3, 2, 1, 0})) + span.SetSpanID(pcommon.NewSpanID([8]byte{0, 1, 2, 3, 4, 5, 6, 7})) + span.SetTraceID(pcommon.NewTraceID([16]byte{0, 1, 2, 3, 4, 5, 6, 7, 7, 6, 5, 4, 3, 2, 1, 0})) span.SetStartTimestamp(1581452772000000000) span.SetEndTimestamp(1581452773000000000) return td diff --git a/receiver/jaegerreceiver/trace_receiver_test.go b/receiver/jaegerreceiver/trace_receiver_test.go index a7bdfd7295a5..043334ed09ba 100644 --- a/receiver/jaegerreceiver/trace_receiver_test.go +++ b/receiver/jaegerreceiver/trace_receiver_test.go @@ -43,8 +43,9 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -246,13 +247,13 @@ func TestGRPCReceptionWithTLS(t *testing.T) { assert.EqualValues(t, want, gotTraces[0]) } -func expectedTraceData(t1, t2, t3 time.Time) pdata.Traces { - traceID := pdata.NewTraceID( +func expectedTraceData(t1, t2, t3 time.Time) ptrace.Traces { + traceID := pcommon.NewTraceID( [16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}) - parentSpanID := pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18}) - childSpanID := pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8}) + parentSpanID := pcommon.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18}) + childSpanID := pcommon.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8}) - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, "issaTest") rs.Resource().Attributes().InsertBool("bool", true) @@ -265,18 +266,18 @@ func expectedTraceData(t1, t2, t3 time.Time) pdata.Traces { span0.SetParentSpanID(parentSpanID) span0.SetTraceID(traceID) span0.SetName("DBSearch") - span0.SetStartTimestamp(pdata.NewTimestampFromTime(t1)) - span0.SetEndTimestamp(pdata.NewTimestampFromTime(t2)) - span0.Status().SetCode(pdata.StatusCodeError) + span0.SetStartTimestamp(pcommon.NewTimestampFromTime(t1)) + span0.SetEndTimestamp(pcommon.NewTimestampFromTime(t2)) + span0.Status().SetCode(ptrace.StatusCodeError) span0.Status().SetMessage("Stale indices") span1 := spans.AppendEmpty() span1.SetSpanID(parentSpanID) span1.SetTraceID(traceID) span1.SetName("ProxyFetch") - span1.SetStartTimestamp(pdata.NewTimestampFromTime(t2)) - span1.SetEndTimestamp(pdata.NewTimestampFromTime(t3)) - span1.Status().SetCode(pdata.StatusCodeError) + span1.SetStartTimestamp(pcommon.NewTimestampFromTime(t2)) + span1.SetEndTimestamp(pcommon.NewTimestampFromTime(t3)) + span1.Status().SetCode(ptrace.StatusCodeError) span1.Status().SetMessage("Frontend crash") return traces @@ -307,7 +308,7 @@ func grpcFixture(t1 time.Time, d1, d2 time.Duration) *api_v2.PostSpansRequest { Duration: d1, Tags: []model.KeyValue{ model.String(conventions.OtelStatusDescription, "Stale indices"), - model.Int64(conventions.OtelStatusCode, int64(pdata.StatusCodeError)), + model.Int64(conventions.OtelStatusCode, int64(ptrace.StatusCodeError)), model.Bool("error", true), }, References: []model.SpanRef{ @@ -326,7 +327,7 @@ func grpcFixture(t1 time.Time, d1, d2 time.Duration) *api_v2.PostSpansRequest { Duration: d2, Tags: []model.KeyValue{ model.String(conventions.OtelStatusDescription, "Frontend crash"), - model.Int64(conventions.OtelStatusCode, int64(pdata.StatusCodeError)), + model.Int64(conventions.OtelStatusCode, int64(ptrace.StatusCodeError)), model.Bool("error", true), }, }, diff --git a/receiver/jmxreceiver/go.mod b/receiver/jmxreceiver/go.mod index 1e508b65f32e..6ef782294946 100644 --- a/receiver/jmxreceiver/go.mod +++ b/receiver/jmxreceiver/go.mod @@ -7,8 +7,8 @@ require ( github.com/shirou/gopsutil/v3 v3.22.3 github.com/stretchr/testify v1.7.1 github.com/testcontainers/testcontainers-go v0.13.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/atomic v1.9.0 go.uber.org/zap v1.21.0 @@ -19,7 +19,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.23 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/containerd v1.5.9 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -39,7 +39,7 @@ require ( github.com/google/uuid v1.3.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -59,18 +59,18 @@ require ( github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rs/cors v1.8.2 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect @@ -83,3 +83,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/jmxreceiver/go.sum b/receiver/jmxreceiver/go.sum index c8385b79bbf6..39aca9d3ec35 100644 --- a/receiver/jmxreceiver/go.sum +++ b/receiver/jmxreceiver/go.sum @@ -131,8 +131,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -533,8 +534,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -743,8 +744,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -823,10 +822,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -838,7 +839,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -954,8 +955,9 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/receiver/jmxreceiver/integration_test.go b/receiver/jmxreceiver/integration_test.go index 185fc9f0b9c7..05b9639209c9 100644 --- a/receiver/jmxreceiver/integration_test.go +++ b/receiver/jmxreceiver/integration_test.go @@ -35,7 +35,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter/exporterhelper" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" ) @@ -232,7 +232,7 @@ func (suite *JMXIntegrationSuite) TestJMXReceiverHappyPath() { require.Equal(t, "By", met.Unit()) // otel-java only uses int sum w/ non-monotonic for up down counters instead of gauge - require.Equal(t, pdata.MetricDataTypeSum, met.DataType()) + require.Equal(t, pmetric.MetricDataTypeSum, met.DataType()) sum := met.Sum() require.False(t, sum.IsMonotonic()) diff --git a/receiver/journaldreceiver/go.mod b/receiver/journaldreceiver/go.mod index cf2d50adb81a..bbff6684d706 100644 --- a/receiver/journaldreceiver/go.mod +++ b/receiver/journaldreceiver/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza v0.48.0 github.com/open-telemetry/opentelemetry-log-collection v0.29.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d gopkg.in/yaml.v2 v2.4.0 ) @@ -15,7 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -24,9 +24,8 @@ require ( github.com/observiq/ctimefmt v1.0.0 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -41,3 +40,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza => ../../internal/stanza replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage => ../../extension/storage + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/journaldreceiver/go.sum b/receiver/journaldreceiver/go.sum index fb22fe74bab2..d5e5d6470709 100644 --- a/receiver/journaldreceiver/go.sum +++ b/receiver/journaldreceiver/go.sum @@ -19,7 +19,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -76,7 +76,6 @@ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -108,8 +107,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -168,8 +167,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -188,17 +185,17 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/receiver/k8sclusterreceiver/go.mod b/receiver/k8sclusterreceiver/go.mod index e9d5dfd5dbf5..445bb4b8130d 100644 --- a/receiver/k8sclusterreceiver/go.mod +++ b/receiver/k8sclusterreceiver/go.mod @@ -12,8 +12,9 @@ require ( github.com/openshift/api v0.0.0-20210521075222-e273a339932a github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/atomic v1.9.0 go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 @@ -36,7 +37,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -47,18 +48,17 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/otel/sdk v1.6.3 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/appengine v1.6.7 // indirect @@ -84,3 +84,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experiment replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/k8sclusterreceiver/go.sum b/receiver/k8sclusterreceiver/go.sum index ca422e84dee7..6ebabff7b263 100644 --- a/receiver/k8sclusterreceiver/go.sum +++ b/receiver/k8sclusterreceiver/go.sum @@ -69,7 +69,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -220,7 +220,6 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -269,8 +268,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -354,8 +353,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -383,20 +380,20 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= -go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= -go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -494,8 +491,9 @@ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -573,13 +571,14 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/k8sclusterreceiver/internal/collection/collector.go b/receiver/k8sclusterreceiver/internal/collection/collector.go index 14de5ec63b46..588761fe319c 100644 --- a/receiver/k8sclusterreceiver/internal/collection/collector.go +++ b/receiver/k8sclusterreceiver/internal/collection/collector.go @@ -20,7 +20,7 @@ import ( agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" quotav1 "github.com/openshift/api/quota/v1" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/service/featuregate" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" @@ -145,7 +145,7 @@ func (dc *DataCollector) UpdateMetricsStore(obj interface{}, rm []*resourceMetri } } -func (dc *DataCollector) CollectMetricData(currentTime time.Time) pdata.Metrics { +func (dc *DataCollector) CollectMetricData(currentTime time.Time) pmetric.Metrics { return dc.metricsStore.getMetricData(currentTime) } diff --git a/receiver/k8sclusterreceiver/internal/collection/metricsstore.go b/receiver/k8sclusterreceiver/internal/collection/metricsstore.go index d99cde3b622c..231553095edc 100644 --- a/receiver/k8sclusterreceiver/internal/collection/metricsstore.go +++ b/receiver/k8sclusterreceiver/internal/collection/metricsstore.go @@ -21,7 +21,7 @@ import ( agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1" metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "google.golang.org/protobuf/types/known/timestamppb" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -83,11 +83,11 @@ func (ms *metricsStore) remove(obj runtime.Object) error { } // getMetricData returns metricsCache stored in the cache at a given point in time. -func (ms *metricsStore) getMetricData(currentTime time.Time) pdata.Metrics { +func (ms *metricsStore) getMetricData(currentTime time.Time) pmetric.Metrics { ms.RLock() defer ms.RUnlock() - out := pdata.NewMetrics() + out := pmetric.NewMetrics() for _, mds := range ms.metricsCache { for i := range mds { // Set datapoint timestamp to be time of retrieval from cache. diff --git a/receiver/k8seventsreceiver/go.mod b/receiver/k8seventsreceiver/go.mod index be223ed52b8c..8b6ae98040ed 100644 --- a/receiver/k8seventsreceiver/go.mod +++ b/receiver/k8seventsreceiver/go.mod @@ -5,8 +5,9 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 @@ -25,7 +26,7 @@ require ( github.com/googleapis/gnostic v0.5.5 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -37,7 +38,6 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -45,10 +45,10 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/appengine v1.6.7 // indirect @@ -66,3 +66,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => ../../internal/k8sconfig + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/k8seventsreceiver/go.sum b/receiver/k8seventsreceiver/go.sum index 97f9bfe4f95e..d856a7eb0380 100644 --- a/receiver/k8seventsreceiver/go.sum +++ b/receiver/k8seventsreceiver/go.sum @@ -67,7 +67,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= @@ -208,7 +208,6 @@ github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3i github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -253,8 +252,8 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -338,8 +337,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -367,17 +364,19 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -473,8 +472,9 @@ golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -552,13 +552,14 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/k8seventsreceiver/k8s_event_to_logdata.go b/receiver/k8seventsreceiver/k8s_event_to_logdata.go index a8f64452ff32..7ff565e40e3e 100644 --- a/receiver/k8seventsreceiver/k8s_event_to_logdata.go +++ b/receiver/k8seventsreceiver/k8s_event_to_logdata.go @@ -17,30 +17,31 @@ package k8seventsreceiver // import "github.com/open-telemetry/opentelemetry-col import ( "strings" - "go.opentelemetry.io/collector/model/pdata" semconv "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" ) const ( - // Number of log attributes to add to the pdata.LogRecordSlice. + // Number of log attributes to add to the plog.LogRecordSlice. totalLogAttributes = 7 - // Number of resource attributes to add to the pdata.ResourceLogs. + // Number of resource attributes to add to the plog.ResourceLogs. totalResourceAttributes = 6 ) // Only two types of events are created as of now. // For more info: https://docs.openshift.com/container-platform/4.9/rest_api/metadata_apis/event-core-v1.html -var severityMap = map[string]pdata.SeverityNumber{ - "normal": pdata.SeverityNumberINFO, - "warning": pdata.SeverityNumberWARN, +var severityMap = map[string]plog.SeverityNumber{ + "normal": plog.SeverityNumberINFO, + "warning": plog.SeverityNumberWARN, } -// k8sEventToLogRecord converts Kubernetes event to pdata.LogRecordSlice and adds the resource attributes. -func k8sEventToLogData(logger *zap.Logger, ev *corev1.Event) pdata.Logs { - ld := pdata.NewLogs() +// k8sEventToLogRecord converts Kubernetes event to plog.LogRecordSlice and adds the resource attributes. +func k8sEventToLogData(logger *zap.Logger, ev *corev1.Event) plog.Logs { + ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() lr := sl.LogRecords().AppendEmpty() @@ -59,7 +60,7 @@ func k8sEventToLogData(logger *zap.Logger, ev *corev1.Event) pdata.Logs { resourceAttrs.InsertString("k8s.object.api_version", ev.InvolvedObject.APIVersion) resourceAttrs.InsertString("k8s.object.resource_version", ev.InvolvedObject.ResourceVersion) - lr.SetTimestamp(pdata.NewTimestampFromTime(getEventTimestamp(ev))) + lr.SetTimestamp(pcommon.NewTimestampFromTime(getEventTimestamp(ev))) // The Message field contains description about the event, // which is best suited for the "Body" of the LogRecordSlice. diff --git a/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go b/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go index 0ddeaae899fd..3d40b9dd6624 100644 --- a/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go +++ b/receiver/k8seventsreceiver/k8s_event_to_logdata_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" ) @@ -70,6 +70,6 @@ func TestUnknownSeverity(t *testing.T) { rl := ld.ResourceLogs().At(0) logEntry := rl.ScopeLogs().At(0).LogRecords().At(0) - assert.Equal(t, logEntry.SeverityNumber(), pdata.SeverityNumberUNDEFINED) + assert.Equal(t, logEntry.SeverityNumber(), plog.SeverityNumberUNDEFINED) assert.Equal(t, logEntry.SeverityText(), "") } diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index 33992ca1fa26..ef92b2d4e5eb 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -21,7 +21,8 @@ import ( "github.com/Shopify/sarama" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" @@ -55,13 +56,13 @@ func (s *brokerScraper) shutdown(context.Context) error { return nil } -func (s *brokerScraper) scrape(context.Context) (pdata.Metrics, error) { +func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { brokers := s.client.Brokers() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() ilm := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(instrumentationLibName) - addIntGauge(ilm.Metrics(), metadata.M.KafkaBrokers.Name(), pdata.NewTimestampFromTime(time.Now()), pdata.NewMap(), int64(len(brokers))) + addIntGauge(ilm.Metrics(), metadata.M.KafkaBrokers.Name(), pcommon.NewTimestampFromTime(time.Now()), pcommon.NewMap(), int64(len(brokers))) return md, nil } diff --git a/receiver/kafkametricsreceiver/consumer_scraper.go b/receiver/kafkametricsreceiver/consumer_scraper.go index 1fb032f807e7..903002807d82 100644 --- a/receiver/kafkametricsreceiver/consumer_scraper.go +++ b/receiver/kafkametricsreceiver/consumer_scraper.go @@ -22,7 +22,8 @@ import ( "github.com/Shopify/sarama" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/multierr" "go.uber.org/zap" @@ -68,10 +69,10 @@ func (s *consumerScraper) shutdown(_ context.Context) error { return nil } -func (s *consumerScraper) scrape(context.Context) (pdata.Metrics, error) { +func (s *consumerScraper) scrape(context.Context) (pmetric.Metrics, error) { cgs, listErr := s.clusterAdmin.ListConsumerGroups() if listErr != nil { - return pdata.Metrics{}, listErr + return pmetric.Metrics{}, listErr } var matchedGrpIds []string @@ -83,7 +84,7 @@ func (s *consumerScraper) scrape(context.Context) (pdata.Metrics, error) { allTopics, listErr := s.clusterAdmin.ListTopics() if listErr != nil { - return pdata.Metrics{}, listErr + return pmetric.Metrics{}, listErr } matchedTopics := map[string]sarama.TopicDetail{} @@ -117,15 +118,15 @@ func (s *consumerScraper) scrape(context.Context) (pdata.Metrics, error) { } consumerGroups, listErr := s.clusterAdmin.DescribeConsumerGroups(matchedGrpIds) if listErr != nil { - return pdata.Metrics{}, listErr + return pmetric.Metrics{}, listErr } - now := pdata.NewTimestampFromTime(time.Now()) - md := pdata.NewMetrics() + now := pcommon.NewTimestampFromTime(time.Now()) + md := pmetric.NewMetrics() ilm := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(instrumentationLibName) for _, group := range consumerGroups { - labels := pdata.NewMap() + labels := pcommon.NewMap() labels.UpsertString(metadata.A.Group, group.GroupId) addIntGauge(ilm.Metrics(), metadata.M.KafkaConsumerGroupMembers.Name(), now, labels, int64(len(group.Members))) groupOffsetFetchResponse, err := s.clusterAdmin.ListConsumerGroupOffsets(group.GroupId, topicPartitions) diff --git a/receiver/kafkametricsreceiver/go.mod b/receiver/kafkametricsreceiver/go.mod index 15aefcaaf700..36367f09f47c 100644 --- a/receiver/kafkametricsreceiver/go.mod +++ b/receiver/kafkametricsreceiver/go.mod @@ -7,13 +7,12 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/exporter/kafkaexporter v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) -require go.uber.org/multierr v1.8.0 - require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.5.1 // indirect @@ -41,7 +40,7 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -55,7 +54,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect @@ -63,13 +61,14 @@ require ( github.com/xdg-go/scram v1.1.1 // indirect github.com/xdg-go/stringprep v1.0.3 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -88,3 +87,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/kafkametricsreceiver/go.sum b/receiver/kafkametricsreceiver/go.sum index 50cc2a3daeca..e885a2428452 100644 --- a/receiver/kafkametricsreceiver/go.sum +++ b/receiver/kafkametricsreceiver/go.sum @@ -498,8 +498,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -686,8 +686,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -768,17 +766,19 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -880,8 +880,9 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -967,8 +968,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go index 1e56b9842d3e..0bba79e7560b 100644 --- a/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kafkametricsreceiver/internal/metadata/generated_metrics.go @@ -4,7 +4,7 @@ package metadata import ( "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Type is the component type name. @@ -13,14 +13,14 @@ const Type config.Type = "kafkametricsreceiver" // MetricIntf is an interface to generically interact with generated metric. type MetricIntf interface { Name() string - New() pdata.Metric - Init(metric pdata.Metric) + New() pmetric.Metric + Init(metric pmetric.Metric) } // Intentionally not exposing this so that it is opaque and can change freely. type metricImpl struct { name string - initFunc func(pdata.Metric) + initFunc func(pmetric.Metric) } // Name returns the metric name. @@ -29,14 +29,14 @@ func (m *metricImpl) Name() string { } // New creates a metric object preinitialized. -func (m *metricImpl) New() pdata.Metric { - metric := pdata.NewMetric() +func (m *metricImpl) New() pmetric.Metric { + metric := pmetric.NewMetric() m.Init(metric) return metric } // Init initializes the provided metric object. -func (m *metricImpl) Init(metric pdata.Metric) { +func (m *metricImpl) Init(metric pmetric.Metric) { m.initFunc(metric) } @@ -94,101 +94,101 @@ func (m *metricStruct) ByName(n string) MetricIntf { var Metrics = &metricStruct{ &metricImpl{ "kafka.brokers", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.brokers") metric.SetDescription("Number of brokers in the cluster.") metric.SetUnit("{brokers}") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.consumer_group.lag", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.consumer_group.lag") metric.SetDescription("Current approximate lag of consumer group at partition of topic") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.consumer_group.lag_sum", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.consumer_group.lag_sum") metric.SetDescription("Current approximate sum of consumer group lag across all partitions of topic") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.consumer_group.members", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.consumer_group.members") metric.SetDescription("Count of members in the consumer group") metric.SetUnit("{members}") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.consumer_group.offset", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.consumer_group.offset") metric.SetDescription("Current offset of the consumer group at partition of topic") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.consumer_group.offset_sum", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.consumer_group.offset_sum") metric.SetDescription("Sum of consumer group offset across partitions of topic") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.partition.current_offset", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.partition.current_offset") metric.SetDescription("Current offset of partition of topic.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.partition.oldest_offset", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.partition.oldest_offset") metric.SetDescription("Oldest offset of partition of topic") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.partition.replicas", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.partition.replicas") metric.SetDescription("Number of replicas for partition of topic") metric.SetUnit("{replicas}") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.partition.replicas_in_sync", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.partition.replicas_in_sync") metric.SetDescription("Number of synchronized replicas of partition") metric.SetUnit("{replicas}") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "kafka.topic.partitions", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("kafka.topic.partitions") metric.SetDescription("Number of partitions in topic.") metric.SetUnit("{partitions}") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, } diff --git a/receiver/kafkametricsreceiver/topic_scraper.go b/receiver/kafkametricsreceiver/topic_scraper.go index 73ce8efb3363..fe787a0be259 100644 --- a/receiver/kafkametricsreceiver/topic_scraper.go +++ b/receiver/kafkametricsreceiver/topic_scraper.go @@ -22,7 +22,8 @@ import ( "github.com/Shopify/sarama" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" @@ -58,17 +59,17 @@ func (s *topicScraper) shutdown(context.Context) error { return nil } -func (s *topicScraper) scrape(context.Context) (pdata.Metrics, error) { +func (s *topicScraper) scrape(context.Context) (pmetric.Metrics, error) { topics, err := s.client.Topics() if err != nil { s.logger.Error("Error fetching cluster topics ", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } var scrapeErrors = scrapererror.ScrapeErrors{} - now := pdata.NewTimestampFromTime(time.Now()) - md := pdata.NewMetrics() + now := pcommon.NewTimestampFromTime(time.Now()) + md := pmetric.NewMetrics() ilm := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName(instrumentationLibName) for _, topic := range topics { @@ -80,7 +81,7 @@ func (s *topicScraper) scrape(context.Context) (pdata.Metrics, error) { scrapeErrors.Add(err) continue } - labels := pdata.NewMap() + labels := pcommon.NewMap() labels.UpsertString(metadata.A.Topic, topic) addIntGauge(ilm.Metrics(), metadata.M.KafkaTopicPartitions.Name(), now, labels, int64(len(partitions))) for _, partition := range partitions { @@ -133,10 +134,10 @@ func createTopicsScraper(_ context.Context, cfg Config, saramaConfig *sarama.Con ) } -func addIntGauge(ms pdata.MetricSlice, name string, now pdata.Timestamp, labels pdata.Map, value int64) { +func addIntGauge(ms pmetric.MetricSlice, name string, now pcommon.Timestamp, labels pcommon.Map, value int64) { m := ms.AppendEmpty() m.SetName(name) - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) dp := m.Gauge().DataPoints().AppendEmpty() dp.SetTimestamp(now) dp.SetIntVal(value) diff --git a/receiver/kafkareceiver/factory_test.go b/receiver/kafkareceiver/factory_test.go index 8a24a6230bcb..f25fb71c3ee3 100644 --- a/receiver/kafkareceiver/factory_test.go +++ b/receiver/kafkareceiver/factory_test.go @@ -22,7 +22,9 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configtest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestCreateDefaultConfig(t *testing.T) { @@ -178,7 +180,7 @@ type customLogsUnmarshaler struct { var _ TracesUnmarshaler = (*customTracesUnmarshaler)(nil) -func (c customTracesUnmarshaler) Unmarshal([]byte) (pdata.Traces, error) { +func (c customTracesUnmarshaler) Unmarshal([]byte) (ptrace.Traces, error) { panic("implement me") } @@ -186,7 +188,7 @@ func (c customTracesUnmarshaler) Encoding() string { return "custom" } -func (c customMetricsUnmarshaler) Unmarshal([]byte) (pdata.Metrics, error) { +func (c customMetricsUnmarshaler) Unmarshal([]byte) (pmetric.Metrics, error) { panic("implement me") } @@ -194,7 +196,7 @@ func (c customMetricsUnmarshaler) Encoding() string { return "custom" } -func (c customLogsUnmarshaler) Unmarshal([]byte) (pdata.Logs, error) { +func (c customLogsUnmarshaler) Unmarshal([]byte) (plog.Logs, error) { panic("implement me") } diff --git a/receiver/kafkareceiver/go.mod b/receiver/kafkareceiver/go.mod index 18a4fb30db4d..5ea5a24d9868 100644 --- a/receiver/kafkareceiver/go.mod +++ b/receiver/kafkareceiver/go.mod @@ -14,14 +14,15 @@ require ( github.com/openzipkin/zipkin-go v0.4.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( github.com/aws/aws-sdk-go v1.43.37 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/eapache/go-resiliency v1.2.0 // indirect @@ -38,7 +39,7 @@ require ( github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -47,7 +48,6 @@ require ( github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect @@ -59,11 +59,8 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -78,3 +75,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin => ../../pkg/translator/zipkin replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/kafkareceiver/go.sum b/receiver/kafkareceiver/go.sum index c0814e7c0f10..d1a3aa04786b 100644 --- a/receiver/kafkareceiver/go.sum +++ b/receiver/kafkareceiver/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.30.0/go.mod h1:zujlQQx1kzHsh4jfV1USnptCQrHAEZ2Hk8fTKCulPVs= github.com/Shopify/sarama v1.32.0 h1:P+RUjEaRU0GMMbYexGMDyrMkLhbbBVUVISDywi+IlFU= github.com/Shopify/sarama v1.32.0/go.mod h1:+EmJJKZWVT/faR9RcOxJerP+LId4iWdQPBGLy1Y1Njs= @@ -29,21 +28,16 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -59,7 +53,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -120,7 +113,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= @@ -177,8 +169,8 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -246,9 +238,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -280,17 +269,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -338,8 +329,9 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -371,8 +363,7 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -406,7 +397,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -416,10 +406,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/receiver/kafkareceiver/jaeger_unmarshaler.go b/receiver/kafkareceiver/jaeger_unmarshaler.go index b983cf8bf149..b7cd32c352a9 100644 --- a/receiver/kafkareceiver/jaeger_unmarshaler.go +++ b/receiver/kafkareceiver/jaeger_unmarshaler.go @@ -19,7 +19,7 @@ import ( "github.com/gogo/protobuf/jsonpb" jaegerproto "github.com/jaegertracing/jaeger/model" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) @@ -29,11 +29,11 @@ type jaegerProtoSpanUnmarshaler struct { var _ TracesUnmarshaler = (*jaegerProtoSpanUnmarshaler)(nil) -func (j jaegerProtoSpanUnmarshaler) Unmarshal(bytes []byte) (pdata.Traces, error) { +func (j jaegerProtoSpanUnmarshaler) Unmarshal(bytes []byte) (ptrace.Traces, error) { span := &jaegerproto.Span{} err := span.Unmarshal(bytes) if err != nil { - return pdata.NewTraces(), err + return ptrace.NewTraces(), err } return jaegerSpanToTraces(span) } @@ -47,11 +47,11 @@ type jaegerJSONSpanUnmarshaler struct { var _ TracesUnmarshaler = (*jaegerJSONSpanUnmarshaler)(nil) -func (j jaegerJSONSpanUnmarshaler) Unmarshal(data []byte) (pdata.Traces, error) { +func (j jaegerJSONSpanUnmarshaler) Unmarshal(data []byte) (ptrace.Traces, error) { span := &jaegerproto.Span{} err := jsonpb.Unmarshal(bytes.NewReader(data), span) if err != nil { - return pdata.NewTraces(), err + return ptrace.NewTraces(), err } return jaegerSpanToTraces(span) } @@ -60,7 +60,7 @@ func (j jaegerJSONSpanUnmarshaler) Encoding() string { return "jaeger_json" } -func jaegerSpanToTraces(span *jaegerproto.Span) (pdata.Traces, error) { +func jaegerSpanToTraces(span *jaegerproto.Span) (ptrace.Traces, error) { batch := jaegerproto.Batch{ Spans: []*jaegerproto.Span{span}, Process: span.Process, diff --git a/receiver/kafkareceiver/jaeger_unmarshaler_test.go b/receiver/kafkareceiver/jaeger_unmarshaler_test.go index 4947eb174e97..15224ec6a38f 100644 --- a/receiver/kafkareceiver/jaeger_unmarshaler_test.go +++ b/receiver/kafkareceiver/jaeger_unmarshaler_test.go @@ -21,19 +21,20 @@ import ( "github.com/gogo/protobuf/jsonpb" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger" ) func TestUnmarshalJaeger(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() span := td.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("foo") - span.SetStartTimestamp(pdata.Timestamp(10)) - span.SetEndTimestamp(pdata.Timestamp(20)) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetStartTimestamp(pcommon.Timestamp(10)) + span.SetEndTimestamp(pcommon.Timestamp(20)) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) batches, err := jaeger.ProtoFromTraces(td) require.NoError(t, err) @@ -73,13 +74,13 @@ func TestUnmarshalJaeger(t *testing.T) { func TestUnmarshalJaegerProto_error(t *testing.T) { p := jaegerProtoSpanUnmarshaler{} got, err := p.Unmarshal([]byte("+$%")) - assert.Equal(t, pdata.NewTraces(), got) + assert.Equal(t, ptrace.NewTraces(), got) assert.Error(t, err) } func TestUnmarshalJaegerJSON_error(t *testing.T) { p := jaegerJSONSpanUnmarshaler{} got, err := p.Unmarshal([]byte("+$%")) - assert.Equal(t, pdata.NewTraces(), got) + assert.Equal(t, ptrace.NewTraces(), got) assert.Error(t, err) } diff --git a/receiver/kafkareceiver/kafka_receiver_test.go b/receiver/kafkareceiver/kafka_receiver_test.go index 8ad077cc0d8d..cdb70564e7ec 100644 --- a/receiver/kafkareceiver/kafka_receiver_test.go +++ b/receiver/kafkareceiver/kafka_receiver_test.go @@ -28,9 +28,10 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -133,7 +134,7 @@ func TestTracesConsumerGroupHandler(t *testing.T) { defer view.Unregister(views...) c := tracesConsumerGroupHandler{ - unmarshaler: newPdataTracesUnmarshaler(otlp.NewProtobufTracesUnmarshaler(), defaultEncoding), + unmarshaler: newPdataTracesUnmarshaler(ptrace.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), @@ -175,7 +176,7 @@ func TestTracesConsumerGroupHandler(t *testing.T) { func TestTracesConsumerGroupHandler_error_unmarshal(t *testing.T) { c := tracesConsumerGroupHandler{ - unmarshaler: newPdataTracesUnmarshaler(otlp.NewProtobufTracesUnmarshaler(), defaultEncoding), + unmarshaler: newPdataTracesUnmarshaler(ptrace.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), @@ -200,7 +201,7 @@ func TestTracesConsumerGroupHandler_error_unmarshal(t *testing.T) { func TestTracesConsumerGroupHandler_error_nextConsumer(t *testing.T) { consumerError := errors.New("failed to consume") c := tracesConsumerGroupHandler{ - unmarshaler: newPdataTracesUnmarshaler(otlp.NewProtobufTracesUnmarshaler(), defaultEncoding), + unmarshaler: newPdataTracesUnmarshaler(ptrace.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewErr(consumerError), @@ -218,9 +219,9 @@ func TestTracesConsumerGroupHandler_error_nextConsumer(t *testing.T) { wg.Done() }() - td := pdata.NewTraces() + td := ptrace.NewTraces() td.ResourceSpans().AppendEmpty() - bts, err := otlp.NewProtobufTracesMarshaler().MarshalTraces(td) + bts, err := ptrace.NewProtoMarshaler().MarshalTraces(td) require.NoError(t, err) groupClaim.messageChan <- &sarama.ConsumerMessage{Value: bts} close(groupClaim.messageChan) @@ -321,7 +322,7 @@ func TestMetricsConsumerGroupHandler(t *testing.T) { defer view.Unregister(views...) c := metricsConsumerGroupHandler{ - unmarshaler: newPdataMetricsUnmarshaler(otlp.NewProtobufMetricsUnmarshaler(), defaultEncoding), + unmarshaler: newPdataMetricsUnmarshaler(pmetric.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), @@ -363,7 +364,7 @@ func TestMetricsConsumerGroupHandler(t *testing.T) { func TestMetricsConsumerGroupHandler_error_unmarshal(t *testing.T) { c := metricsConsumerGroupHandler{ - unmarshaler: newPdataMetricsUnmarshaler(otlp.NewProtobufMetricsUnmarshaler(), defaultEncoding), + unmarshaler: newPdataMetricsUnmarshaler(pmetric.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), @@ -388,7 +389,7 @@ func TestMetricsConsumerGroupHandler_error_unmarshal(t *testing.T) { func TestMetricsConsumerGroupHandler_error_nextConsumer(t *testing.T) { consumerError := errors.New("failed to consume") c := metricsConsumerGroupHandler{ - unmarshaler: newPdataMetricsUnmarshaler(otlp.NewProtobufMetricsUnmarshaler(), defaultEncoding), + unmarshaler: newPdataMetricsUnmarshaler(pmetric.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewErr(consumerError), @@ -407,7 +408,7 @@ func TestMetricsConsumerGroupHandler_error_nextConsumer(t *testing.T) { }() ld := testdata.GenerateMetricsOneMetric() - bts, err := otlp.NewProtobufMetricsMarshaler().MarshalMetrics(ld) + bts, err := pmetric.NewProtoMarshaler().MarshalMetrics(ld) require.NoError(t, err) groupClaim.messageChan <- &sarama.ConsumerMessage{Value: bts} close(groupClaim.messageChan) @@ -508,7 +509,7 @@ func TestLogsConsumerGroupHandler(t *testing.T) { defer view.Unregister(views...) c := logsConsumerGroupHandler{ - unmarshaler: newPdataLogsUnmarshaler(otlp.NewProtobufLogsUnmarshaler(), defaultEncoding), + unmarshaler: newPdataLogsUnmarshaler(plog.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), @@ -550,7 +551,7 @@ func TestLogsConsumerGroupHandler(t *testing.T) { func TestLogsConsumerGroupHandler_error_unmarshal(t *testing.T) { c := logsConsumerGroupHandler{ - unmarshaler: newPdataLogsUnmarshaler(otlp.NewProtobufLogsUnmarshaler(), defaultEncoding), + unmarshaler: newPdataLogsUnmarshaler(plog.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewNop(), @@ -575,7 +576,7 @@ func TestLogsConsumerGroupHandler_error_unmarshal(t *testing.T) { func TestLogsConsumerGroupHandler_error_nextConsumer(t *testing.T) { consumerError := errors.New("failed to consume") c := logsConsumerGroupHandler{ - unmarshaler: newPdataLogsUnmarshaler(otlp.NewProtobufLogsUnmarshaler(), defaultEncoding), + unmarshaler: newPdataLogsUnmarshaler(plog.NewProtoUnmarshaler(), defaultEncoding), logger: zap.NewNop(), ready: make(chan bool), nextConsumer: consumertest.NewErr(consumerError), @@ -594,7 +595,7 @@ func TestLogsConsumerGroupHandler_error_nextConsumer(t *testing.T) { }() ld := testdata.GenerateLogsOneLogRecord() - bts, err := otlp.NewProtobufLogsMarshaler().MarshalLogs(ld) + bts, err := plog.NewProtoMarshaler().MarshalLogs(ld) require.NoError(t, err) groupClaim.messageChan <- &sarama.ConsumerMessage{Value: bts} close(groupClaim.messageChan) diff --git a/receiver/kafkareceiver/pdata_unmarshaler.go b/receiver/kafkareceiver/pdata_unmarshaler.go index e81a563ca3a6..595a3dbcdfc5 100644 --- a/receiver/kafkareceiver/pdata_unmarshaler.go +++ b/receiver/kafkareceiver/pdata_unmarshaler.go @@ -15,65 +15,67 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) type pdataLogsUnmarshaler struct { - pdata.LogsUnmarshaler + plog.Unmarshaler encoding string } -func (p pdataLogsUnmarshaler) Unmarshal(buf []byte) (pdata.Logs, error) { - return p.LogsUnmarshaler.UnmarshalLogs(buf) +func (p pdataLogsUnmarshaler) Unmarshal(buf []byte) (plog.Logs, error) { + return p.Unmarshaler.UnmarshalLogs(buf) } func (p pdataLogsUnmarshaler) Encoding() string { return p.encoding } -func newPdataLogsUnmarshaler(unmarshaler pdata.LogsUnmarshaler, encoding string) LogsUnmarshaler { +func newPdataLogsUnmarshaler(unmarshaler plog.Unmarshaler, encoding string) LogsUnmarshaler { return pdataLogsUnmarshaler{ - LogsUnmarshaler: unmarshaler, - encoding: encoding, + Unmarshaler: unmarshaler, + encoding: encoding, } } type pdataTracesUnmarshaler struct { - pdata.TracesUnmarshaler + ptrace.Unmarshaler encoding string } -func (p pdataTracesUnmarshaler) Unmarshal(buf []byte) (pdata.Traces, error) { - return p.TracesUnmarshaler.UnmarshalTraces(buf) +func (p pdataTracesUnmarshaler) Unmarshal(buf []byte) (ptrace.Traces, error) { + return p.Unmarshaler.UnmarshalTraces(buf) } func (p pdataTracesUnmarshaler) Encoding() string { return p.encoding } -func newPdataTracesUnmarshaler(unmarshaler pdata.TracesUnmarshaler, encoding string) TracesUnmarshaler { +func newPdataTracesUnmarshaler(unmarshaler ptrace.Unmarshaler, encoding string) TracesUnmarshaler { return pdataTracesUnmarshaler{ - TracesUnmarshaler: unmarshaler, - encoding: encoding, + Unmarshaler: unmarshaler, + encoding: encoding, } } type pdataMetricsUnmarshaler struct { - pdata.MetricsUnmarshaler + pmetric.Unmarshaler encoding string } -func (p pdataMetricsUnmarshaler) Unmarshal(buf []byte) (pdata.Metrics, error) { - return p.MetricsUnmarshaler.UnmarshalMetrics(buf) +func (p pdataMetricsUnmarshaler) Unmarshal(buf []byte) (pmetric.Metrics, error) { + return p.Unmarshaler.UnmarshalMetrics(buf) } func (p pdataMetricsUnmarshaler) Encoding() string { return p.encoding } -func newPdataMetricsUnmarshaler(unmarshaler pdata.MetricsUnmarshaler, encoding string) MetricsUnmarshaler { +func newPdataMetricsUnmarshaler(unmarshaler pmetric.Unmarshaler, encoding string) MetricsUnmarshaler { return pdataMetricsUnmarshaler{ - MetricsUnmarshaler: unmarshaler, - encoding: encoding, + Unmarshaler: unmarshaler, + encoding: encoding, } } diff --git a/receiver/kafkareceiver/pdata_unmarshaler_test.go b/receiver/kafkareceiver/pdata_unmarshaler_test.go index 243e18ab8ad4..2d9cc01eaace 100644 --- a/receiver/kafkareceiver/pdata_unmarshaler_test.go +++ b/receiver/kafkareceiver/pdata_unmarshaler_test.go @@ -18,20 +18,22 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" ) func TestNewPdataTracesUnmarshaler(t *testing.T) { - um := newPdataTracesUnmarshaler(otlp.NewProtobufTracesUnmarshaler(), "test") + um := newPdataTracesUnmarshaler(ptrace.NewProtoUnmarshaler(), "test") assert.Equal(t, "test", um.Encoding()) } func TestNewPdataMetricsUnmarshaler(t *testing.T) { - um := newPdataMetricsUnmarshaler(otlp.NewProtobufMetricsUnmarshaler(), "test") + um := newPdataMetricsUnmarshaler(pmetric.NewProtoUnmarshaler(), "test") assert.Equal(t, "test", um.Encoding()) } func TestNewPdataLogsUnmarshaler(t *testing.T) { - um := newPdataLogsUnmarshaler(otlp.NewProtobufLogsUnmarshaler(), "test") + um := newPdataLogsUnmarshaler(plog.NewProtoUnmarshaler(), "test") assert.Equal(t, "test", um.Encoding()) } diff --git a/receiver/kafkareceiver/unmarshaler.go b/receiver/kafkareceiver/unmarshaler.go index 55045d7a2bb0..d3a759415602 100644 --- a/receiver/kafkareceiver/unmarshaler.go +++ b/receiver/kafkareceiver/unmarshaler.go @@ -15,8 +15,9 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" import ( - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2" @@ -25,7 +26,7 @@ import ( // TracesUnmarshaler deserializes the message body. type TracesUnmarshaler interface { // Unmarshal deserializes the message body into traces. - Unmarshal([]byte) (pdata.Traces, error) + Unmarshal([]byte) (ptrace.Traces, error) // Encoding of the serialized messages. Encoding() string @@ -34,7 +35,7 @@ type TracesUnmarshaler interface { // MetricsUnmarshaler deserializes the message body type MetricsUnmarshaler interface { // Unmarshal deserializes the message body into traces - Unmarshal([]byte) (pdata.Metrics, error) + Unmarshal([]byte) (pmetric.Metrics, error) // Encoding of the serialized messages Encoding() string @@ -43,7 +44,7 @@ type MetricsUnmarshaler interface { // LogsUnmarshaler deserializes the message body. type LogsUnmarshaler interface { // Unmarshal deserializes the message body into traces. - Unmarshal([]byte) (pdata.Logs, error) + Unmarshal([]byte) (plog.Logs, error) // Encoding of the serialized messages. Encoding() string @@ -51,7 +52,7 @@ type LogsUnmarshaler interface { // defaultTracesUnmarshalers returns map of supported encodings with TracesUnmarshaler. func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { - otlpPb := newPdataTracesUnmarshaler(otlp.NewProtobufTracesUnmarshaler(), defaultEncoding) + otlpPb := newPdataTracesUnmarshaler(ptrace.NewProtoUnmarshaler(), defaultEncoding) jaegerProto := jaegerProtoSpanUnmarshaler{} jaegerJSON := jaegerJSONSpanUnmarshaler{} zipkinProto := newPdataTracesUnmarshaler(zipkinv2.NewProtobufTracesUnmarshaler(false, false), "zipkin_proto") @@ -68,14 +69,14 @@ func defaultTracesUnmarshalers() map[string]TracesUnmarshaler { } func defaultMetricsUnmarshalers() map[string]MetricsUnmarshaler { - otlpPb := newPdataMetricsUnmarshaler(otlp.NewProtobufMetricsUnmarshaler(), defaultEncoding) + otlpPb := newPdataMetricsUnmarshaler(pmetric.NewProtoUnmarshaler(), defaultEncoding) return map[string]MetricsUnmarshaler{ otlpPb.Encoding(): otlpPb, } } func defaultLogsUnmarshalers() map[string]LogsUnmarshaler { - otlpPb := newPdataLogsUnmarshaler(otlp.NewProtobufLogsUnmarshaler(), defaultEncoding) + otlpPb := newPdataLogsUnmarshaler(plog.NewProtoUnmarshaler(), defaultEncoding) return map[string]LogsUnmarshaler{ otlpPb.Encoding(): otlpPb, } diff --git a/receiver/kafkareceiver/zipkin_unmarshaler_test.go b/receiver/kafkareceiver/zipkin_unmarshaler_test.go index 06fee5325bac..975ebcafe256 100644 --- a/receiver/kafkareceiver/zipkin_unmarshaler_test.go +++ b/receiver/kafkareceiver/zipkin_unmarshaler_test.go @@ -24,8 +24,9 @@ import ( zipkinreporter "github.com/openzipkin/zipkin-go/reporter" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2" ) @@ -33,16 +34,16 @@ import ( var v2FromTranslator zipkinv2.FromTranslator func TestUnmarshalZipkin(t *testing.T) { - td := pdata.NewTraces() + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, "my_service") span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() span.SetName("foo") - span.SetStartTimestamp(pdata.Timestamp(1597759000)) - span.SetEndTimestamp(pdata.Timestamp(1597769000)) - span.SetTraceID(pdata.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) - span.SetSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) - span.SetParentSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 0})) + span.SetStartTimestamp(pcommon.Timestamp(1597759000)) + span.SetEndTimestamp(pcommon.Timestamp(1597769000)) + span.SetTraceID(pcommon.NewTraceID([16]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16})) + span.SetSpanID(pcommon.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8})) + span.SetParentSpanID(pcommon.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 0})) spans, err := v2FromTranslator.FromTraces(td) require.NoError(t, err) @@ -68,7 +69,7 @@ func TestUnmarshalZipkin(t *testing.T) { unmarshaler TracesUnmarshaler encoding string bytes []byte - expected pdata.Traces + expected ptrace.Traces }{ { unmarshaler: newZipkinProtobufUnmarshaler(), diff --git a/receiver/kubeletstatsreceiver/go.mod b/receiver/kubeletstatsreceiver/go.mod index 71c48bfdba53..1b50ed893e2b 100644 --- a/receiver/kubeletstatsreceiver/go.mod +++ b/receiver/kubeletstatsreceiver/go.mod @@ -6,8 +6,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 @@ -26,7 +27,7 @@ require ( github.com/googleapis/gnostic v0.5.5 // indirect github.com/imdario/mergo v0.3.11 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -41,7 +42,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -49,10 +49,10 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect google.golang.org/appengine v1.6.7 // indirect @@ -73,3 +73,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/commo replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig => ../../internal/k8sconfig replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/kubelet => ../../internal/kubelet + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/kubeletstatsreceiver/go.sum b/receiver/kubeletstatsreceiver/go.sum index ba2c10cbb142..f8fc12a78e42 100644 --- a/receiver/kubeletstatsreceiver/go.sum +++ b/receiver/kubeletstatsreceiver/go.sum @@ -84,7 +84,7 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -244,7 +244,6 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -307,8 +306,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -439,8 +438,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -478,10 +475,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= @@ -496,7 +495,7 @@ go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujX go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= @@ -608,8 +607,9 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -701,13 +701,14 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go index c8444856da45..e5523dc462c9 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/accumulator.go @@ -17,7 +17,8 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" ) @@ -41,7 +42,7 @@ var ValidMetricGroups = map[MetricGroup]bool{ } type metricDataAccumulator struct { - m []pdata.Metrics + m []pmetric.Metrics metadata Metadata logger *zap.Logger metricGroupsToCollect map[MetricGroup]bool @@ -61,15 +62,15 @@ func (a *metricDataAccumulator) nodeStats(s stats.NodeStats) { return } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() fillNodeResource(rm.Resource(), s) ilm := rm.ScopeMetrics().AppendEmpty() ilm.Scope().SetName(a.typeStr) - startTime := pdata.NewTimestampFromTime(s.StartTime.Time) - currentTime := pdata.NewTimestampFromTime(a.time) + startTime := pcommon.NewTimestampFromTime(s.StartTime.Time) + currentTime := pcommon.NewTimestampFromTime(a.time) addCPUMetrics(ilm.Metrics(), nodePrefix, s.CPU, startTime, currentTime) addMemoryMetrics(ilm.Metrics(), nodePrefix, s.Memory, currentTime) addFilesystemMetrics(ilm.Metrics(), nodePrefix, s.Fs, currentTime) @@ -84,15 +85,15 @@ func (a *metricDataAccumulator) podStats(s stats.PodStats) { return } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() fillPodResource(rm.Resource(), s) ilm := rm.ScopeMetrics().AppendEmpty() ilm.Scope().SetName(a.typeStr) - startTime := pdata.NewTimestampFromTime(s.StartTime.Time) - currentTime := pdata.NewTimestampFromTime(a.time) + startTime := pcommon.NewTimestampFromTime(s.StartTime.Time) + currentTime := pcommon.NewTimestampFromTime(a.time) addCPUMetrics(ilm.Metrics(), podPrefix, s.CPU, startTime, currentTime) addMemoryMetrics(ilm.Metrics(), podPrefix, s.Memory, currentTime) addFilesystemMetrics(ilm.Metrics(), podPrefix, s.EphemeralStorage, currentTime) @@ -106,7 +107,7 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont return } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() if err := fillContainerResource(rm.Resource(), sPod, s, a.metadata); err != nil { @@ -121,8 +122,8 @@ func (a *metricDataAccumulator) containerStats(sPod stats.PodStats, s stats.Cont ilm := rm.ScopeMetrics().AppendEmpty() ilm.Scope().SetName(a.typeStr) - startTime := pdata.NewTimestampFromTime(s.StartTime.Time) - currentTime := pdata.NewTimestampFromTime(a.time) + startTime := pcommon.NewTimestampFromTime(s.StartTime.Time) + currentTime := pcommon.NewTimestampFromTime(a.time) addCPUMetrics(ilm.Metrics(), containerPrefix, s.CPU, startTime, currentTime) addMemoryMetrics(ilm.Metrics(), containerPrefix, s.Memory, currentTime) addFilesystemMetrics(ilm.Metrics(), containerPrefix, s.Rootfs, currentTime) @@ -134,7 +135,7 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS return } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() if err := fillVolumeResource(rm.Resource(), sPod, s, a.metadata); err != nil { @@ -149,7 +150,7 @@ func (a *metricDataAccumulator) volumeStats(sPod stats.PodStats, s stats.VolumeS ilm := rm.ScopeMetrics().AppendEmpty() ilm.Scope().SetName(a.typeStr) - currentTime := pdata.NewTimestampFromTime(a.time) + currentTime := pcommon.NewTimestampFromTime(a.time) addVolumeMetrics(ilm.Metrics(), k8sPrefix, s, currentTime) a.m = append(a.m, md) } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go index 0ecfb143fcd7..a6a5b6ac18e5 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/cpu.go @@ -15,13 +15,14 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func addCPUMetrics(dest pdata.MetricSlice, prefix string, s *stats.CPUStats, startTime pdata.Timestamp, currentTime pdata.Timestamp) { +func addCPUMetrics(dest pmetric.MetricSlice, prefix string, s *stats.CPUStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) { if s == nil { return } @@ -29,7 +30,7 @@ func addCPUMetrics(dest pdata.MetricSlice, prefix string, s *stats.CPUStats, sta addCPUTimeMetric(dest, prefix, s, startTime, currentTime) } -func addCPUUsageMetric(dest pdata.MetricSlice, prefix string, s *stats.CPUStats, currentTime pdata.Timestamp) { +func addCPUUsageMetric(dest pmetric.MetricSlice, prefix string, s *stats.CPUStats, currentTime pcommon.Timestamp) { if s.UsageNanoCores == nil { return } @@ -37,7 +38,7 @@ func addCPUUsageMetric(dest pdata.MetricSlice, prefix string, s *stats.CPUStats, fillDoubleGauge(dest.AppendEmpty(), prefix, metadata.M.CPUUtilization, value, currentTime) } -func addCPUTimeMetric(dest pdata.MetricSlice, prefix string, s *stats.CPUStats, startTime pdata.Timestamp, currentTime pdata.Timestamp) { +func addCPUTimeMetric(dest pmetric.MetricSlice, prefix string, s *stats.CPUStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) { if s.UsageCoreNanoSeconds == nil { return } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/fs.go b/receiver/kubeletstatsreceiver/internal/kubelet/fs.go index c3aa7504a309..464592ee0fb4 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/fs.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/fs.go @@ -15,13 +15,14 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func addFilesystemMetrics(dest pdata.MetricSlice, prefix string, s *stats.FsStats, currentTime pdata.Timestamp) { +func addFilesystemMetrics(dest pmetric.MetricSlice, prefix string, s *stats.FsStats, currentTime pcommon.Timestamp) { if s == nil { return } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go index b495bdcfa81c..c40192e28149 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/mem.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/mem.go @@ -15,13 +15,14 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func addMemoryMetrics(dest pdata.MetricSlice, prefix string, s *stats.MemoryStats, currentTime pdata.Timestamp) { +func addMemoryMetrics(dest pmetric.MetricSlice, prefix string, s *stats.MemoryStats, currentTime pcommon.Timestamp) { if s == nil { return } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go index bebbd15011bb..d3d25421b3d4 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics.go @@ -17,7 +17,7 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" ) @@ -25,7 +25,7 @@ import ( func MetricsData( logger *zap.Logger, summary *stats.Summary, metadata Metadata, typeStr string, - metricGroupsToCollect map[MetricGroup]bool) []pdata.Metrics { + metricGroupsToCollect map[MetricGroup]bool) []pmetric.Metrics { acc := &metricDataAccumulator{ metadata: metadata, logger: logger, diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go index e9e726fae825..52af8f7a912a 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metrics_test.go @@ -19,7 +19,8 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -47,7 +48,7 @@ func TestMetricAccumulator(t *testing.T) { require.Equal(t, 0, len(MetricsData(zap.NewNop(), summary, metadata, "", map[MetricGroup]bool{}))) } -func requireMetricsOk(t *testing.T, mds []pdata.Metrics) { +func requireMetricsOk(t *testing.T, mds []pmetric.Metrics) { for _, md := range mds { for i := 0; i < md.ResourceMetrics().Len(); i++ { rm := md.ResourceMetrics().At(i) @@ -62,22 +63,22 @@ func requireMetricsOk(t *testing.T, mds []pdata.Metrics) { } } -func requireMetricOk(t *testing.T, m pdata.Metric) { +func requireMetricOk(t *testing.T, m pmetric.Metric) { require.NotZero(t, m.Name()) - require.NotEqual(t, pdata.MetricDataTypeNone, m.DataType()) + require.NotEqual(t, pmetric.MetricDataTypeNone, m.DataType()) switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: gauge := m.Gauge() for i := 0; i < gauge.DataPoints().Len(); i++ { dp := gauge.DataPoints().At(i) require.NotZero(t, dp.Timestamp()) requirePointOk(t, dp) } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: sum := m.Sum() require.True(t, sum.IsMonotonic()) - require.Equal(t, pdata.MetricAggregationTemporalityCumulative, sum.AggregationTemporality()) + require.Equal(t, pmetric.MetricAggregationTemporalityCumulative, sum.AggregationTemporality()) for i := 0; i < sum.DataPoints().Len(); i++ { dp := sum.DataPoints().At(i) // Start time is required for cumulative metrics. Make assertions @@ -90,12 +91,12 @@ func requireMetricOk(t *testing.T, m pdata.Metric) { } } -func requirePointOk(t *testing.T, point pdata.NumberDataPoint) { +func requirePointOk(t *testing.T, point pmetric.NumberDataPoint) { require.NotZero(t, point.Timestamp()) - require.NotEqual(t, pdata.MetricValueTypeNone, point.ValueType()) + require.NotEqual(t, pmetric.MetricValueTypeNone, point.ValueType()) } -func requireResourceOk(t *testing.T, resource pdata.Resource) { +func requireResourceOk(t *testing.T, resource pcommon.Resource) { require.NotZero(t, resource.Attributes().Len()) } @@ -129,14 +130,14 @@ func TestMajorPageFaults(t *testing.T) { require.Equal(t, int64(12), value) } -func requireContains(t *testing.T, metrics map[string][]pdata.Metric, metricName string) { +func requireContains(t *testing.T, metrics map[string][]pmetric.Metric, metricName string) { _, found := metrics[metricName] require.True(t, found) } -func indexedFakeMetrics() map[string][]pdata.Metric { +func indexedFakeMetrics() map[string][]pmetric.Metric { mds := fakeMetrics() - metrics := make(map[string][]pdata.Metric) + metrics := make(map[string][]pmetric.Metric) for _, md := range mds { for i := 0; i < md.ResourceMetrics().Len(); i++ { rm := md.ResourceMetrics().At(i) @@ -155,7 +156,7 @@ func indexedFakeMetrics() map[string][]pdata.Metric { return metrics } -func fakeMetrics() []pdata.Metrics { +func fakeMetrics() []pmetric.Metrics { rc := &fakeRestClient{} statsProvider := NewStatsProvider(rc) summary, _ := statsProvider.StatsSummary() diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/network.go b/receiver/kubeletstatsreceiver/internal/kubelet/network.go index d2ab8c05026c..5d8916f74f95 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/network.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/network.go @@ -15,13 +15,14 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func addNetworkMetrics(dest pdata.MetricSlice, prefix string, s *stats.NetworkStats, startTime pdata.Timestamp, currentTime pdata.Timestamp) { +func addNetworkMetrics(dest pmetric.MetricSlice, prefix string, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) { if s == nil { return } @@ -29,7 +30,7 @@ func addNetworkMetrics(dest pdata.MetricSlice, prefix string, s *stats.NetworkSt addNetworkErrorsMetric(dest, prefix, s, startTime, currentTime) } -func addNetworkIOMetric(dest pdata.MetricSlice, prefix string, s *stats.NetworkStats, startTime pdata.Timestamp, currentTime pdata.Timestamp) { +func addNetworkIOMetric(dest pmetric.MetricSlice, prefix string, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) { if s.RxBytes == nil && s.TxBytes == nil { return } @@ -42,7 +43,7 @@ func addNetworkIOMetric(dest pdata.MetricSlice, prefix string, s *stats.NetworkS fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Transmit, s.TxBytes, startTime, currentTime) } -func addNetworkErrorsMetric(dest pdata.MetricSlice, prefix string, s *stats.NetworkStats, startTime pdata.Timestamp, currentTime pdata.Timestamp) { +func addNetworkErrorsMetric(dest pmetric.MetricSlice, prefix string, s *stats.NetworkStats, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) { if s.RxBytes == nil && s.TxBytes == nil { return } @@ -55,7 +56,7 @@ func addNetworkErrorsMetric(dest pdata.MetricSlice, prefix string, s *stats.Netw fillNetworkDataPoint(m.Sum().DataPoints(), s.Name, metadata.AttributeDirection.Transmit, s.TxErrors, startTime, currentTime) } -func fillNetworkDataPoint(dps pdata.NumberDataPointSlice, interfaceName string, direction string, value *uint64, startTime pdata.Timestamp, currentTime pdata.Timestamp) { +func fillNetworkDataPoint(dps pmetric.NumberDataPointSlice, interfaceName string, direction string, value *uint64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) { if value == nil { return } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go index 66c91815f335..22cb730b7149 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/resource.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/resource.go @@ -17,22 +17,22 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" ) -func fillNodeResource(dest pdata.Resource, s stats.NodeStats) { +func fillNodeResource(dest pcommon.Resource, s stats.NodeStats) { dest.Attributes().UpsertString(conventions.AttributeK8SNodeName, s.NodeName) } -func fillPodResource(dest pdata.Resource, s stats.PodStats) { +func fillPodResource(dest pcommon.Resource, s stats.PodStats) { dest.Attributes().UpsertString(conventions.AttributeK8SPodUID, s.PodRef.UID) dest.Attributes().UpsertString(conventions.AttributeK8SPodName, s.PodRef.Name) dest.Attributes().UpsertString(conventions.AttributeK8SNamespaceName, s.PodRef.Namespace) } -func fillContainerResource(dest pdata.Resource, sPod stats.PodStats, sContainer stats.ContainerStats, metadata Metadata) error { +func fillContainerResource(dest pcommon.Resource, sPod stats.PodStats, sContainer stats.ContainerStats, metadata Metadata) error { labels := map[string]string{ conventions.AttributeK8SPodUID: sPod.PodRef.UID, conventions.AttributeK8SPodName: sPod.PodRef.Name, @@ -48,7 +48,7 @@ func fillContainerResource(dest pdata.Resource, sPod stats.PodStats, sContainer return nil } -func fillVolumeResource(dest pdata.Resource, sPod stats.PodStats, vs stats.VolumeStats, metadata Metadata) error { +func fillVolumeResource(dest pcommon.Resource, sPod stats.PodStats, vs stats.VolumeStats, metadata Metadata) error { labels := map[string]string{ conventions.AttributeK8SPodUID: sPod.PodRef.UID, conventions.AttributeK8SPodName: sPod.PodRef.Name, diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go index 7784be59d05a..2b362e0dbfa3 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/utils.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/utils.go @@ -15,12 +15,13 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/kubelet" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func fillDoubleGauge(dest pdata.Metric, prefix string, metricInt metadata.MetricIntf, value float64, currentTime pdata.Timestamp) { +func fillDoubleGauge(dest pmetric.Metric, prefix string, metricInt metadata.MetricIntf, value float64, currentTime pcommon.Timestamp) { metricInt.Init(dest) dest.SetName(prefix + dest.Name()) dp := dest.Gauge().DataPoints().AppendEmpty() @@ -28,14 +29,14 @@ func fillDoubleGauge(dest pdata.Metric, prefix string, metricInt metadata.Metric dp.SetTimestamp(currentTime) } -func addIntGauge(dest pdata.MetricSlice, prefix string, metricInt metadata.MetricIntf, value *uint64, currentTime pdata.Timestamp) { +func addIntGauge(dest pmetric.MetricSlice, prefix string, metricInt metadata.MetricIntf, value *uint64, currentTime pcommon.Timestamp) { if value == nil { return } fillIntGauge(dest.AppendEmpty(), prefix, metricInt, int64(*value), currentTime) } -func fillIntGauge(dest pdata.Metric, prefix string, metricInt metadata.MetricIntf, value int64, currentTime pdata.Timestamp) { +func fillIntGauge(dest pmetric.Metric, prefix string, metricInt metadata.MetricIntf, value int64, currentTime pcommon.Timestamp) { metricInt.Init(dest) dest.SetName(prefix + dest.Name()) dp := dest.Gauge().DataPoints().AppendEmpty() @@ -43,7 +44,7 @@ func fillIntGauge(dest pdata.Metric, prefix string, metricInt metadata.MetricInt dp.SetTimestamp(currentTime) } -func fillDoubleSum(dest pdata.Metric, prefix string, metricInt metadata.MetricIntf, value float64, startTime pdata.Timestamp, currentTime pdata.Timestamp) { +func fillDoubleSum(dest pmetric.Metric, prefix string, metricInt metadata.MetricIntf, value float64, startTime pcommon.Timestamp, currentTime pcommon.Timestamp) { metricInt.Init(dest) dest.SetName(prefix + dest.Name()) dp := dest.Sum().DataPoints().AppendEmpty() diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go index c13f97074be1..b64962e367f9 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/volume.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume.go @@ -17,14 +17,15 @@ package kubelet // import "github.com/open-telemetry/opentelemetry-collector-con import ( "strconv" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" v1 "k8s.io/api/core/v1" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver/internal/metadata" ) -func addVolumeMetrics(dest pdata.MetricSlice, prefix string, s stats.VolumeStats, currentTime pdata.Timestamp) { +func addVolumeMetrics(dest pmetric.MetricSlice, prefix string, s stats.VolumeStats, currentTime pcommon.Timestamp) { addIntGauge(dest, prefix, metadata.M.VolumeAvailable, s.AvailableBytes, currentTime) addIntGauge(dest, prefix, metadata.M.VolumeCapacity, s.CapacityBytes, currentTime) addIntGauge(dest, prefix, metadata.M.VolumeInodes, s.Inodes, currentTime) diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go index 9d917a7efbcd..dcb0c42577ff 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/volume_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -189,10 +189,10 @@ func TestDetailedPVCLabels(t *testing.T) { }, nil) metadata.DetailedPVCLabelsSetter = tt.detailedPVCLabelsSetterOverride - volumeResource := pdata.NewResource() + volumeResource := pcommon.NewResource() err := fillVolumeResource(volumeResource, podStats, stats.VolumeStats{Name: tt.volumeName}, metadata) require.NoError(t, err) - require.Equal(t, pdata.NewMapFromRaw(tt.want).Sort(), volumeResource.Attributes().Sort()) + require.Equal(t, pcommon.NewMapFromRaw(tt.want).Sort(), volumeResource.Attributes().Sort()) }) } } diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go index 5c63b14f0215..9b7b5e88bdbf 100644 --- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go +++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go @@ -4,7 +4,7 @@ package metadata import ( "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Type is the component type name. @@ -13,14 +13,14 @@ const Type config.Type = "kubeletstatsreceiver" // MetricIntf is an interface to generically interact with generated metric. type MetricIntf interface { Name() string - New() pdata.Metric - Init(metric pdata.Metric) + New() pmetric.Metric + Init(metric pmetric.Metric) } // Intentionally not exposing this so that it is opaque and can change freely. type metricImpl struct { name string - initFunc func(pdata.Metric) + initFunc func(pmetric.Metric) } // Name returns the metric name. @@ -29,14 +29,14 @@ func (m *metricImpl) Name() string { } // New creates a metric object preinitialized. -func (m *metricImpl) New() pdata.Metric { - metric := pdata.NewMetric() +func (m *metricImpl) New() pmetric.Metric { + metric := pmetric.NewMetric() m.Init(metric) return metric } // Init initializes the provided metric object. -func (m *metricImpl) Init(metric pdata.Metric) { +func (m *metricImpl) Init(metric pmetric.Metric) { m.initFunc(metric) } @@ -115,170 +115,170 @@ func (m *metricStruct) ByName(n string) MetricIntf { var Metrics = &metricStruct{ &metricImpl{ "cpu.time", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("cpu.time") metric.SetDescription("CPU time") metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "cpu.utilization", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("cpu.utilization") metric.SetDescription("CPU utilization") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "filesystem.available", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("filesystem.available") metric.SetDescription("Filesystem available") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "filesystem.capacity", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("filesystem.capacity") metric.SetDescription("Filesystem capacity") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "filesystem.usage", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("filesystem.usage") metric.SetDescription("Filesystem usage") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memory.available", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memory.available") metric.SetDescription("Memory available") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memory.major_page_faults", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memory.major_page_faults") metric.SetDescription("Memory major_page_faults") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memory.page_faults", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memory.page_faults") metric.SetDescription("Memory page_faults") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memory.rss", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memory.rss") metric.SetDescription("Memory rss") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memory.usage", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memory.usage") metric.SetDescription("Memory usage") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memory.working_set", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memory.working_set") metric.SetDescription("Memory working_set") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "network.errors", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("network.errors") metric.SetDescription("Network errors") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "network.io", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("network.io") metric.SetDescription("Network IO") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "volume.available", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("volume.available") metric.SetDescription("The number of available bytes in the volume.") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "volume.capacity", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("volume.capacity") metric.SetDescription("The total capacity in bytes of the volume.") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "volume.inodes", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("volume.inodes") metric.SetDescription("The total inodes in the filesystem.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "volume.inodes.free", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("volume.inodes.free") metric.SetDescription("The free inodes in the filesystem.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "volume.inodes.used", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("volume.inodes.used") metric.SetDescription("The inodes used by the filesystem. This may not equal inodes - free because filesystem may share inodes with other filesystems.") metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, } diff --git a/receiver/kubeletstatsreceiver/scraper.go b/receiver/kubeletstatsreceiver/scraper.go index 41eb16b62f73..cca541c2741f 100644 --- a/receiver/kubeletstatsreceiver/scraper.go +++ b/receiver/kubeletstatsreceiver/scraper.go @@ -21,7 +21,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" v1 "k8s.io/api/core/v1" @@ -66,11 +66,11 @@ func newKubletScraper( return scraperhelper.NewScraper(typeStr, ks.scrape) } -func (r *kubletScraper) scrape(context.Context) (pdata.Metrics, error) { +func (r *kubletScraper) scrape(context.Context) (pmetric.Metrics, error) { summary, err := r.statsProvider.StatsSummary() if err != nil { r.logger.Error("call to /stats/summary endpoint failed", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } var podsMetadata *v1.PodList @@ -79,13 +79,13 @@ func (r *kubletScraper) scrape(context.Context) (pdata.Metrics, error) { podsMetadata, err = r.metadataProvider.Pods() if err != nil { r.logger.Error("call to /pods endpoint failed", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } } metadata := kubelet.NewMetadata(r.extraMetadataLabels, podsMetadata, r.detailedPVCLabelsSetter()) mds := kubelet.MetricsData(r.logger, summary, metadata, typeStr, r.metricGroupsToCollect) - md := pdata.NewMetrics() + md := pmetric.NewMetrics() for i := range mds { mds[i].ResourceMetrics().MoveAndAppendTo(md.ResourceMetrics()) } diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 1a9a80cb428c..f49fda152d19 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" "k8s.io/client-go/kubernetes" @@ -379,7 +379,7 @@ func TestScraperWithPVCDetailedLabels(t *testing.T) { } } -func requireExpectedVolume(t *testing.T, ev expectedVolume, resource pdata.Resource) { +func requireExpectedVolume(t *testing.T, ev expectedVolume, resource pcommon.Resource) { require.NotNil(t, ev) requireAttribute(t, resource.Attributes(), "k8s.volume.name", ev.name) @@ -389,7 +389,7 @@ func requireExpectedVolume(t *testing.T, ev expectedVolume, resource pdata.Resou } } -func requireAttribute(t *testing.T, attr pdata.Map, key string, value string) { +func requireAttribute(t *testing.T, attr pcommon.Map, key string, value string) { val, ok := attr.Get(key) require.True(t, ok) require.Equal(t, value, val.StringVal()) diff --git a/receiver/memcachedreceiver/go.mod b/receiver/memcachedreceiver/go.mod index 236cf0fbdf61..f9debd36487a 100644 --- a/receiver/memcachedreceiver/go.mod +++ b/receiver/memcachedreceiver/go.mod @@ -7,8 +7,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -25,7 +25,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -37,16 +37,13 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -61,3 +58,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrap // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/memcachedreceiver/go.sum b/receiver/memcachedreceiver/go.sum index f7bff56c210a..2be6bc8bccc9 100644 --- a/receiver/memcachedreceiver/go.sum +++ b/receiver/memcachedreceiver/go.sum @@ -454,8 +454,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -634,8 +634,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -702,15 +700,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -808,8 +806,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -893,8 +890,8 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -905,7 +902,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/receiver/memcachedreceiver/integration_test.go b/receiver/memcachedreceiver/integration_test.go index b5e810f9b4c6..7ec0427144bb 100644 --- a/receiver/memcachedreceiver/integration_test.go +++ b/receiver/memcachedreceiver/integration_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/otlp" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" @@ -55,7 +55,7 @@ func TestIntegration(t *testing.T) { expectedFileBytes, err := ioutil.ReadFile("./testdata/expected_metrics/test_scraper/expected.json") require.NoError(t, err) - unmarshaller := otlp.NewJSONMetricsUnmarshaler() + unmarshaller := pmetric.NewJSONUnmarshaler() expectedMetrics, err := unmarshaller.UnmarshalMetrics(expectedFileBytes) require.NoError(t, err) diff --git a/receiver/memcachedreceiver/internal/metadata/generated_metrics.go b/receiver/memcachedreceiver/internal/metadata/generated_metrics.go index 3398eacf6ad1..241f5266bcfd 100644 --- a/receiver/memcachedreceiver/internal/metadata/generated_metrics.go +++ b/receiver/memcachedreceiver/internal/metadata/generated_metrics.go @@ -4,7 +4,7 @@ package metadata import ( "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Type is the component type name. @@ -13,14 +13,14 @@ const Type config.Type = "memcachedreceiver" // MetricIntf is an interface to generically interact with generated metric. type MetricIntf interface { Name() string - New() pdata.Metric - Init(metric pdata.Metric) + New() pmetric.Metric + Init(metric pmetric.Metric) } // Intentionally not exposing this so that it is opaque and can change freely. type metricImpl struct { name string - initFunc func(pdata.Metric) + initFunc func(pmetric.Metric) } // Name returns the metric name. @@ -29,14 +29,14 @@ func (m *metricImpl) Name() string { } // New creates a metric object preinitialized. -func (m *metricImpl) New() pdata.Metric { - metric := pdata.NewMetric() +func (m *metricImpl) New() pmetric.Metric { + metric := pmetric.NewMetric() m.Init(metric) return metric } // Init initializes the provided metric object. -func (m *metricImpl) Init(metric pdata.Metric) { +func (m *metricImpl) Init(metric pmetric.Metric) { m.initFunc(metric) } @@ -94,119 +94,119 @@ func (m *metricStruct) ByName(n string) MetricIntf { var Metrics = &metricStruct{ &metricImpl{ "memcached.bytes", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.bytes") metric.SetDescription("Current number of bytes used by this server to store items.") metric.SetUnit("By") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memcached.commands", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.commands") metric.SetDescription("Commands executed.") metric.SetUnit("{commands}") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.connections.current", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.connections.current") metric.SetDescription("The current number of open connections.") metric.SetUnit("{connections}") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.connections.total", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.connections.total") metric.SetDescription("Total number of connections opened since the server started running.") metric.SetUnit("{connections}") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.cpu.usage", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.cpu.usage") metric.SetDescription("Accumulated user and system time.") metric.SetUnit("s") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.current_items", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.current_items") metric.SetDescription("Number of items currently stored in the cache.") metric.SetUnit("{items}") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.evictions", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.evictions") metric.SetDescription("Cache item evictions.") metric.SetUnit("{evictions}") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.network", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.network") metric.SetDescription("Bytes transferred over the network.") metric.SetUnit("by") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.operation_hit_ratio", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.operation_hit_ratio") metric.SetDescription("Hit ratio for operations, expressed as a percentage value between 0.0 and 100.0.") metric.SetUnit("%") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) }, }, &metricImpl{ "memcached.operations", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.operations") metric.SetDescription("Operation counts.") metric.SetUnit("{operations}") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(true) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, &metricImpl{ "memcached.threads", - func(metric pdata.Metric) { + func(metric pmetric.Metric) { metric.SetName("memcached.threads") metric.SetDescription("Number of threads used by the memcached instance.") metric.SetUnit("{threads}") - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) metric.Sum().SetIsMonotonic(false) - metric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + metric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) }, }, } diff --git a/receiver/memcachedreceiver/scraper.go b/receiver/memcachedreceiver/scraper.go index 72844a93a7d3..3673219bb670 100644 --- a/receiver/memcachedreceiver/scraper.go +++ b/receiver/memcachedreceiver/scraper.go @@ -19,7 +19,8 @@ import ( "strconv" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/memcachedreceiver/internal/metadata" @@ -42,23 +43,23 @@ func newMemcachedScraper( } } -func (r *memcachedScraper) scrape(_ context.Context) (pdata.Metrics, error) { +func (r *memcachedScraper) scrape(_ context.Context) (pmetric.Metrics, error) { // Init client in scrape method in case there are transient errors in the // constructor. statsClient, err := r.newClient(r.config.Endpoint, r.config.Timeout) if err != nil { r.logger.Error("Failed to estalbish client", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } allServerStats, err := statsClient.Stats() if err != nil { r.logger.Error("Failed to fetch memcached stats", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } - now := pdata.NewTimestampFromTime(time.Now()) - md := pdata.NewMetrics() + now := pcommon.NewTimestampFromTime(time.Now()) + md := pmetric.NewMetrics() ilm := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() ilm.Scope().SetName("otelcol/memcached") @@ -76,7 +77,7 @@ func (r *memcachedScraper) scrape(_ context.Context) (pdata.Metrics, error) { for _, stats := range allServerStats { for k, v := range stats.Stats { - attributes := pdata.NewMap() + attributes := pcommon.NewMap() switch k { case "bytes": if parsedV, ok := r.parseInt(k, v); ok { @@ -91,22 +92,22 @@ func (r *memcachedScraper) scrape(_ context.Context) (pdata.Metrics, error) { r.addToIntMetric(totalConn, attributes, parsedV, now) } case "cmd_get": - attributes.Insert(metadata.A.Command, pdata.NewValueString("get")) + attributes.Insert(metadata.A.Command, pcommon.NewValueString("get")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(commandCount, attributes, parsedV, now) } case "cmd_set": - attributes.Insert(metadata.A.Command, pdata.NewValueString("set")) + attributes.Insert(metadata.A.Command, pcommon.NewValueString("set")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(commandCount, attributes, parsedV, now) } case "cmd_flush": - attributes.Insert(metadata.A.Command, pdata.NewValueString("flush")) + attributes.Insert(metadata.A.Command, pcommon.NewValueString("flush")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(commandCount, attributes, parsedV, now) } case "cmd_touch": - attributes.Insert(metadata.A.Command, pdata.NewValueString("touch")) + attributes.Insert(metadata.A.Command, pcommon.NewValueString("touch")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(commandCount, attributes, parsedV, now) } @@ -125,59 +126,59 @@ func (r *memcachedScraper) scrape(_ context.Context) (pdata.Metrics, error) { r.addToIntMetric(evictions, attributes, parsedV, now) } case "bytes_read": - attributes.Insert(metadata.A.Direction, pdata.NewValueString("received")) + attributes.Insert(metadata.A.Direction, pcommon.NewValueString("received")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(network, attributes, parsedV, now) } case "bytes_written": - attributes.Insert(metadata.A.Direction, pdata.NewValueString("sent")) + attributes.Insert(metadata.A.Direction, pcommon.NewValueString("sent")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(network, attributes, parsedV, now) } case "get_hits": - attributes.Insert(metadata.A.Operation, pdata.NewValueString("get")) - attributes.Insert(metadata.A.Type, pdata.NewValueString("hit")) + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("get")) + attributes.Insert(metadata.A.Type, pcommon.NewValueString("hit")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(operationCount, attributes, parsedV, now) } case "get_misses": - attributes.Insert(metadata.A.Operation, pdata.NewValueString("get")) - attributes.Insert(metadata.A.Type, pdata.NewValueString("miss")) + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("get")) + attributes.Insert(metadata.A.Type, pcommon.NewValueString("miss")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(operationCount, attributes, parsedV, now) } case "incr_hits": - attributes.Insert(metadata.A.Operation, pdata.NewValueString("increment")) - attributes.Insert(metadata.A.Type, pdata.NewValueString("hit")) + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("increment")) + attributes.Insert(metadata.A.Type, pcommon.NewValueString("hit")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(operationCount, attributes, parsedV, now) } case "incr_misses": - attributes.Insert(metadata.A.Operation, pdata.NewValueString("increment")) - attributes.Insert(metadata.A.Type, pdata.NewValueString("miss")) + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("increment")) + attributes.Insert(metadata.A.Type, pcommon.NewValueString("miss")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(operationCount, attributes, parsedV, now) } case "decr_hits": - attributes.Insert(metadata.A.Operation, pdata.NewValueString("decrement")) - attributes.Insert(metadata.A.Type, pdata.NewValueString("hit")) + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("decrement")) + attributes.Insert(metadata.A.Type, pcommon.NewValueString("hit")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(operationCount, attributes, parsedV, now) } case "decr_misses": - attributes.Insert(metadata.A.Operation, pdata.NewValueString("decrement")) - attributes.Insert(metadata.A.Type, pdata.NewValueString("miss")) + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("decrement")) + attributes.Insert(metadata.A.Type, pcommon.NewValueString("miss")) if parsedV, ok := r.parseInt(k, v); ok { r.addToIntMetric(operationCount, attributes, parsedV, now) } case "rusage_system": - attributes.Insert(metadata.A.State, pdata.NewValueString("system")) + attributes.Insert(metadata.A.State, pcommon.NewValueString("system")) if parsedV, ok := r.parseFloat(k, v); ok { r.addToDoubleMetric(rUsage, attributes, parsedV, now) } case "rusage_user": - attributes.Insert(metadata.A.State, pdata.NewValueString("user")) + attributes.Insert(metadata.A.State, pcommon.NewValueString("user")) if parsedV, ok := r.parseFloat(k, v); ok { r.addToDoubleMetric(rUsage, attributes, parsedV, now) } @@ -185,24 +186,24 @@ func (r *memcachedScraper) scrape(_ context.Context) (pdata.Metrics, error) { } // Calculated Metrics - attributes := pdata.NewMap() - attributes.Insert(metadata.A.Operation, pdata.NewValueString("increment")) + attributes := pcommon.NewMap() + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("increment")) parsedHit, okHit := r.parseInt("incr_hits", stats.Stats["incr_hits"]) parsedMiss, okMiss := r.parseInt("incr_misses", stats.Stats["incr_misses"]) if okHit && okMiss { r.addToDoubleMetric(hitRatio, attributes, calculateHitRatio(parsedHit, parsedMiss), now) } - attributes = pdata.NewMap() - attributes.Insert(metadata.A.Operation, pdata.NewValueString("decrement")) + attributes = pcommon.NewMap() + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("decrement")) parsedHit, okHit = r.parseInt("decr_hits", stats.Stats["decr_hits"]) parsedMiss, okMiss = r.parseInt("decr_misses", stats.Stats["decr_misses"]) if okHit && okMiss { r.addToDoubleMetric(hitRatio, attributes, calculateHitRatio(parsedHit, parsedMiss), now) } - attributes = pdata.NewMap() - attributes.Insert(metadata.A.Operation, pdata.NewValueString("get")) + attributes = pcommon.NewMap() + attributes.Insert(metadata.A.Operation, pcommon.NewValueString("get")) parsedHit, okHit = r.parseInt("get_hits", stats.Stats["get_hits"]) parsedMiss, okMiss = r.parseInt("get_misses", stats.Stats["get_misses"]) if okHit && okMiss { @@ -212,7 +213,7 @@ func (r *memcachedScraper) scrape(_ context.Context) (pdata.Metrics, error) { return md, nil } -func initMetric(ms pdata.MetricSlice, mi metadata.MetricIntf) pdata.Metric { +func initMetric(ms pmetric.MetricSlice, mi metadata.MetricIntf) pmetric.Metric { m := ms.AppendEmpty() mi.Init(m) return m @@ -256,7 +257,7 @@ func (r *memcachedScraper) logInvalid(expectedType, key, value string) { ) } -func (r *memcachedScraper) addToDoubleMetric(metric pdata.NumberDataPointSlice, attributes pdata.Map, value float64, now pdata.Timestamp) { +func (r *memcachedScraper) addToDoubleMetric(metric pmetric.NumberDataPointSlice, attributes pcommon.Map, value float64, now pcommon.Timestamp) { dataPoint := metric.AppendEmpty() dataPoint.SetTimestamp(now) dataPoint.SetDoubleVal(value) @@ -265,7 +266,7 @@ func (r *memcachedScraper) addToDoubleMetric(metric pdata.NumberDataPointSlice, } } -func (r *memcachedScraper) addToIntMetric(metric pdata.NumberDataPointSlice, attributes pdata.Map, value int64, now pdata.Timestamp) { +func (r *memcachedScraper) addToIntMetric(metric pmetric.NumberDataPointSlice, attributes pcommon.Map, value int64, now pcommon.Timestamp) { dataPoint := metric.AppendEmpty() dataPoint.SetTimestamp(now) dataPoint.SetIntVal(value) diff --git a/receiver/mongodbatlasreceiver/go.mod b/receiver/mongodbatlasreceiver/go.mod index 76f4c0a4cd79..df6a74b93474 100644 --- a/receiver/mongodbatlasreceiver/go.mod +++ b/receiver/mongodbatlasreceiver/go.mod @@ -8,8 +8,7 @@ require ( github.com/mongodb-forks/digest v1.0.3 github.com/pkg/errors v0.9.1 go.mongodb.org/atlas v0.15.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) @@ -17,10 +16,9 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -28,23 +26,19 @@ require ( github.com/openlyinc/pointy v1.1.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) require ( github.com/stretchr/testify v1.7.1 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/mongodbatlasreceiver/go.sum b/receiver/mongodbatlasreceiver/go.sum index 5aff912ae7cc..4d5022327200 100644 --- a/receiver/mongodbatlasreceiver/go.sum +++ b/receiver/mongodbatlasreceiver/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -21,16 +18,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -38,16 +27,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -66,18 +51,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -87,14 +67,12 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -126,8 +104,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -171,20 +149,15 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -196,18 +169,17 @@ go.mongodb.org/atlas v0.15.0 h1:YyOBdBIuI//krRITf4r7PSirJ3YDNNUfNmapxwSyDow= go.mongodb.org/atlas v0.15.0/go.mod h1:lQhRHIxc6jQHEK3/q9WLu/SdBkPj2fQYhjLGUF6Z3U8= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -231,20 +203,15 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -260,22 +227,17 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -296,22 +258,14 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -321,11 +275,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -333,8 +283,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogR gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go index ad6396335861..1427367b84db 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -275,7 +276,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricMongodbatlasDbCounts struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -285,11 +286,11 @@ func (m *metricMongodbatlasDbCounts) init() { m.data.SetName("mongodbatlas.db.counts") m.data.SetDescription("Database feature size") m.data.SetUnit("{objects}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDbCounts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { +func (m *metricMongodbatlasDbCounts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -297,7 +298,7 @@ func (m *metricMongodbatlasDbCounts) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.ObjectType, pdata.NewValueString(objectTypeAttributeValue)) + dp.Attributes().Insert(A.ObjectType, pcommon.NewValueString(objectTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -308,7 +309,7 @@ func (m *metricMongodbatlasDbCounts) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDbCounts) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDbCounts) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -319,14 +320,14 @@ func (m *metricMongodbatlasDbCounts) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasDbCounts(settings MetricSettings) metricMongodbatlasDbCounts { m := metricMongodbatlasDbCounts{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDbSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -336,11 +337,11 @@ func (m *metricMongodbatlasDbSize) init() { m.data.SetName("mongodbatlas.db.size") m.data.SetDescription("Database feature size") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDbSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { +func (m *metricMongodbatlasDbSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -348,7 +349,7 @@ func (m *metricMongodbatlasDbSize) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.ObjectType, pdata.NewValueString(objectTypeAttributeValue)) + dp.Attributes().Insert(A.ObjectType, pcommon.NewValueString(objectTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -359,7 +360,7 @@ func (m *metricMongodbatlasDbSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDbSize) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDbSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -370,14 +371,14 @@ func (m *metricMongodbatlasDbSize) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasDbSize(settings MetricSettings) metricMongodbatlasDbSize { m := metricMongodbatlasDbSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionIopsAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -387,11 +388,11 @@ func (m *metricMongodbatlasDiskPartitionIopsAverage) init() { m.data.SetName("mongodbatlas.disk.partition.iops.average") m.data.SetDescription("Disk partition iops") m.data.SetUnit("{ops}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionIopsAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionIopsAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { if !m.settings.Enabled { return } @@ -399,7 +400,7 @@ func (m *metricMongodbatlasDiskPartitionIopsAverage) recordDataPoint(start pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) + dp.Attributes().Insert(A.DiskDirection, pcommon.NewValueString(diskDirectionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -410,7 +411,7 @@ func (m *metricMongodbatlasDiskPartitionIopsAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionIopsAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionIopsAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -421,14 +422,14 @@ func (m *metricMongodbatlasDiskPartitionIopsAverage) emit(metrics pdata.MetricSl func newMetricMongodbatlasDiskPartitionIopsAverage(settings MetricSettings) metricMongodbatlasDiskPartitionIopsAverage { m := metricMongodbatlasDiskPartitionIopsAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionIopsMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -438,11 +439,11 @@ func (m *metricMongodbatlasDiskPartitionIopsMax) init() { m.data.SetName("mongodbatlas.disk.partition.iops.max") m.data.SetDescription("Disk partition iops") m.data.SetUnit("{ops}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionIopsMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionIopsMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { if !m.settings.Enabled { return } @@ -450,7 +451,7 @@ func (m *metricMongodbatlasDiskPartitionIopsMax) recordDataPoint(start pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) + dp.Attributes().Insert(A.DiskDirection, pcommon.NewValueString(diskDirectionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -461,7 +462,7 @@ func (m *metricMongodbatlasDiskPartitionIopsMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionIopsMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionIopsMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -472,14 +473,14 @@ func (m *metricMongodbatlasDiskPartitionIopsMax) emit(metrics pdata.MetricSlice) func newMetricMongodbatlasDiskPartitionIopsMax(settings MetricSettings) metricMongodbatlasDiskPartitionIopsMax { m := metricMongodbatlasDiskPartitionIopsMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionLatencyAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -489,11 +490,11 @@ func (m *metricMongodbatlasDiskPartitionLatencyAverage) init() { m.data.SetName("mongodbatlas.disk.partition.latency.average") m.data.SetDescription("Disk partition latency") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionLatencyAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionLatencyAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { if !m.settings.Enabled { return } @@ -501,7 +502,7 @@ func (m *metricMongodbatlasDiskPartitionLatencyAverage) recordDataPoint(start pd dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) + dp.Attributes().Insert(A.DiskDirection, pcommon.NewValueString(diskDirectionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -512,7 +513,7 @@ func (m *metricMongodbatlasDiskPartitionLatencyAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionLatencyAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionLatencyAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -523,14 +524,14 @@ func (m *metricMongodbatlasDiskPartitionLatencyAverage) emit(metrics pdata.Metri func newMetricMongodbatlasDiskPartitionLatencyAverage(settings MetricSettings) metricMongodbatlasDiskPartitionLatencyAverage { m := metricMongodbatlasDiskPartitionLatencyAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionLatencyMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -540,11 +541,11 @@ func (m *metricMongodbatlasDiskPartitionLatencyMax) init() { m.data.SetName("mongodbatlas.disk.partition.latency.max") m.data.SetDescription("Disk partition latency") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionLatencyMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionLatencyMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { if !m.settings.Enabled { return } @@ -552,7 +553,7 @@ func (m *metricMongodbatlasDiskPartitionLatencyMax) recordDataPoint(start pdata. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskDirection, pdata.NewValueString(diskDirectionAttributeValue)) + dp.Attributes().Insert(A.DiskDirection, pcommon.NewValueString(diskDirectionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -563,7 +564,7 @@ func (m *metricMongodbatlasDiskPartitionLatencyMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionLatencyMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionLatencyMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -574,14 +575,14 @@ func (m *metricMongodbatlasDiskPartitionLatencyMax) emit(metrics pdata.MetricSli func newMetricMongodbatlasDiskPartitionLatencyMax(settings MetricSettings) metricMongodbatlasDiskPartitionLatencyMax { m := metricMongodbatlasDiskPartitionLatencyMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionSpaceAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -591,11 +592,11 @@ func (m *metricMongodbatlasDiskPartitionSpaceAverage) init() { m.data.SetName("mongodbatlas.disk.partition.space.average") m.data.SetDescription("Disk partition space") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionSpaceAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionSpaceAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -603,7 +604,7 @@ func (m *metricMongodbatlasDiskPartitionSpaceAverage) recordDataPoint(start pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) + dp.Attributes().Insert(A.DiskStatus, pcommon.NewValueString(diskStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -614,7 +615,7 @@ func (m *metricMongodbatlasDiskPartitionSpaceAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionSpaceAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionSpaceAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -625,14 +626,14 @@ func (m *metricMongodbatlasDiskPartitionSpaceAverage) emit(metrics pdata.MetricS func newMetricMongodbatlasDiskPartitionSpaceAverage(settings MetricSettings) metricMongodbatlasDiskPartitionSpaceAverage { m := metricMongodbatlasDiskPartitionSpaceAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionSpaceMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -642,11 +643,11 @@ func (m *metricMongodbatlasDiskPartitionSpaceMax) init() { m.data.SetName("mongodbatlas.disk.partition.space.max") m.data.SetDescription("Disk partition space") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionSpaceMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionSpaceMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -654,7 +655,7 @@ func (m *metricMongodbatlasDiskPartitionSpaceMax) recordDataPoint(start pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) + dp.Attributes().Insert(A.DiskStatus, pcommon.NewValueString(diskStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -665,7 +666,7 @@ func (m *metricMongodbatlasDiskPartitionSpaceMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionSpaceMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionSpaceMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -676,14 +677,14 @@ func (m *metricMongodbatlasDiskPartitionSpaceMax) emit(metrics pdata.MetricSlice func newMetricMongodbatlasDiskPartitionSpaceMax(settings MetricSettings) metricMongodbatlasDiskPartitionSpaceMax { m := metricMongodbatlasDiskPartitionSpaceMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -693,11 +694,11 @@ func (m *metricMongodbatlasDiskPartitionUsageAverage) init() { m.data.SetName("mongodbatlas.disk.partition.usage.average") m.data.SetDescription("Disk partition usage (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -705,7 +706,7 @@ func (m *metricMongodbatlasDiskPartitionUsageAverage) recordDataPoint(start pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) + dp.Attributes().Insert(A.DiskStatus, pcommon.NewValueString(diskStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -716,7 +717,7 @@ func (m *metricMongodbatlasDiskPartitionUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -727,14 +728,14 @@ func (m *metricMongodbatlasDiskPartitionUsageAverage) emit(metrics pdata.MetricS func newMetricMongodbatlasDiskPartitionUsageAverage(settings MetricSettings) metricMongodbatlasDiskPartitionUsageAverage { m := metricMongodbatlasDiskPartitionUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -744,11 +745,11 @@ func (m *metricMongodbatlasDiskPartitionUsageMax) init() { m.data.SetName("mongodbatlas.disk.partition.usage.max") m.data.SetDescription("Disk partition usage (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -756,7 +757,7 @@ func (m *metricMongodbatlasDiskPartitionUsageMax) recordDataPoint(start pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) + dp.Attributes().Insert(A.DiskStatus, pcommon.NewValueString(diskStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -767,7 +768,7 @@ func (m *metricMongodbatlasDiskPartitionUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -778,14 +779,14 @@ func (m *metricMongodbatlasDiskPartitionUsageMax) emit(metrics pdata.MetricSlice func newMetricMongodbatlasDiskPartitionUsageMax(settings MetricSettings) metricMongodbatlasDiskPartitionUsageMax { m := metricMongodbatlasDiskPartitionUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionUtilizationAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -795,11 +796,11 @@ func (m *metricMongodbatlasDiskPartitionUtilizationAverage) init() { m.data.SetName("mongodbatlas.disk.partition.utilization.average") m.data.SetDescription("Disk partition utilization (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionUtilizationAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionUtilizationAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -807,7 +808,7 @@ func (m *metricMongodbatlasDiskPartitionUtilizationAverage) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) + dp.Attributes().Insert(A.DiskStatus, pcommon.NewValueString(diskStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -818,7 +819,7 @@ func (m *metricMongodbatlasDiskPartitionUtilizationAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionUtilizationAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionUtilizationAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -829,14 +830,14 @@ func (m *metricMongodbatlasDiskPartitionUtilizationAverage) emit(metrics pdata.M func newMetricMongodbatlasDiskPartitionUtilizationAverage(settings MetricSettings) metricMongodbatlasDiskPartitionUtilizationAverage { m := metricMongodbatlasDiskPartitionUtilizationAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasDiskPartitionUtilizationMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -846,11 +847,11 @@ func (m *metricMongodbatlasDiskPartitionUtilizationMax) init() { m.data.SetName("mongodbatlas.disk.partition.utilization.max") m.data.SetDescription("Disk partition utilization (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasDiskPartitionUtilizationMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (m *metricMongodbatlasDiskPartitionUtilizationMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -858,7 +859,7 @@ func (m *metricMongodbatlasDiskPartitionUtilizationMax) recordDataPoint(start pd dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DiskStatus, pdata.NewValueString(diskStatusAttributeValue)) + dp.Attributes().Insert(A.DiskStatus, pcommon.NewValueString(diskStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -869,7 +870,7 @@ func (m *metricMongodbatlasDiskPartitionUtilizationMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasDiskPartitionUtilizationMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasDiskPartitionUtilizationMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -880,14 +881,14 @@ func (m *metricMongodbatlasDiskPartitionUtilizationMax) emit(metrics pdata.Metri func newMetricMongodbatlasDiskPartitionUtilizationMax(settings MetricSettings) metricMongodbatlasDiskPartitionUtilizationMax { m := metricMongodbatlasDiskPartitionUtilizationMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessAsserts struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -897,11 +898,11 @@ func (m *metricMongodbatlasProcessAsserts) init() { m.data.SetName("mongodbatlas.process.asserts") m.data.SetDescription("Number of assertions per second") m.data.SetUnit("{assertions}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessAsserts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, assertTypeAttributeValue string) { +func (m *metricMongodbatlasProcessAsserts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, assertTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -909,7 +910,7 @@ func (m *metricMongodbatlasProcessAsserts) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.AssertType, pdata.NewValueString(assertTypeAttributeValue)) + dp.Attributes().Insert(A.AssertType, pcommon.NewValueString(assertTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -920,7 +921,7 @@ func (m *metricMongodbatlasProcessAsserts) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessAsserts) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessAsserts) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -931,14 +932,14 @@ func (m *metricMongodbatlasProcessAsserts) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessAsserts(settings MetricSettings) metricMongodbatlasProcessAsserts { m := metricMongodbatlasProcessAsserts{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessBackgroundFlush struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -948,10 +949,10 @@ func (m *metricMongodbatlasProcessBackgroundFlush) init() { m.data.SetName("mongodbatlas.process.background_flush") m.data.SetDescription("Amount of data flushed in the background") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasProcessBackgroundFlush) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessBackgroundFlush) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -969,7 +970,7 @@ func (m *metricMongodbatlasProcessBackgroundFlush) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessBackgroundFlush) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessBackgroundFlush) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -980,14 +981,14 @@ func (m *metricMongodbatlasProcessBackgroundFlush) emit(metrics pdata.MetricSlic func newMetricMongodbatlasProcessBackgroundFlush(settings MetricSettings) metricMongodbatlasProcessBackgroundFlush { m := metricMongodbatlasProcessBackgroundFlush{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCacheIo struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -997,11 +998,11 @@ func (m *metricMongodbatlasProcessCacheIo) init() { m.data.SetName("mongodbatlas.process.cache.io") m.data.SetDescription("Cache throughput (per second)") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCacheIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cacheDirectionAttributeValue string) { +func (m *metricMongodbatlasProcessCacheIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue string) { if !m.settings.Enabled { return } @@ -1009,7 +1010,7 @@ func (m *metricMongodbatlasProcessCacheIo) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CacheDirection, pdata.NewValueString(cacheDirectionAttributeValue)) + dp.Attributes().Insert(A.CacheDirection, pcommon.NewValueString(cacheDirectionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1020,7 +1021,7 @@ func (m *metricMongodbatlasProcessCacheIo) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCacheIo) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCacheIo) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1031,14 +1032,14 @@ func (m *metricMongodbatlasProcessCacheIo) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessCacheIo(settings MetricSettings) metricMongodbatlasProcessCacheIo { m := metricMongodbatlasProcessCacheIo{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCacheSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1048,13 +1049,13 @@ func (m *metricMongodbatlasProcessCacheSize) init() { m.data.SetName("mongodbatlas.process.cache.size") m.data.SetDescription("Cache sizes") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCacheSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cacheStatusAttributeValue string) { +func (m *metricMongodbatlasProcessCacheSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cacheStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -1062,7 +1063,7 @@ func (m *metricMongodbatlasProcessCacheSize) recordDataPoint(start pdata.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CacheStatus, pdata.NewValueString(cacheStatusAttributeValue)) + dp.Attributes().Insert(A.CacheStatus, pcommon.NewValueString(cacheStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1073,7 +1074,7 @@ func (m *metricMongodbatlasProcessCacheSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCacheSize) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCacheSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1084,14 +1085,14 @@ func (m *metricMongodbatlasProcessCacheSize) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessCacheSize(settings MetricSettings) metricMongodbatlasProcessCacheSize { m := metricMongodbatlasProcessCacheSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessConnections struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1101,12 +1102,12 @@ func (m *metricMongodbatlasProcessConnections) init() { m.data.SetName("mongodbatlas.process.connections") m.data.SetDescription("Number of current connections") m.data.SetUnit("{connections}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricMongodbatlasProcessConnections) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessConnections) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -1124,7 +1125,7 @@ func (m *metricMongodbatlasProcessConnections) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessConnections) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessConnections) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1135,14 +1136,14 @@ func (m *metricMongodbatlasProcessConnections) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessConnections(settings MetricSettings) metricMongodbatlasProcessConnections { m := metricMongodbatlasProcessConnections{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1152,11 +1153,11 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) init() { m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.average") m.data.SetDescription("CPU Usage for child processes, normalized to pct") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1164,7 +1165,7 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) recordDataP dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1175,7 +1176,7 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) updateCapac } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1186,14 +1187,14 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage) emit(metric func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage { m := metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUChildrenNormalizedUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1203,11 +1204,11 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) init() { m.data.SetName("mongodbatlas.process.cpu.children.normalized.usage.max") m.data.SetDescription("CPU Usage for child processes, normalized to pct") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1215,7 +1216,7 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) recordDataPoint dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1226,7 +1227,7 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) updateCapacity( } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1237,14 +1238,14 @@ func (m *metricMongodbatlasProcessCPUChildrenNormalizedUsageMax) emit(metrics pd func newMetricMongodbatlasProcessCPUChildrenNormalizedUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUChildrenNormalizedUsageMax { m := metricMongodbatlasProcessCPUChildrenNormalizedUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUChildrenUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1254,11 +1255,11 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) init() { m.data.SetName("mongodbatlas.process.cpu.children.usage.average") m.data.SetDescription("CPU Usage for child processes (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1266,7 +1267,7 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1277,7 +1278,7 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1288,14 +1289,14 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageAverage) emit(metrics pdata.Me func newMetricMongodbatlasProcessCPUChildrenUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUChildrenUsageAverage { m := metricMongodbatlasProcessCPUChildrenUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUChildrenUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1305,11 +1306,11 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageMax) init() { m.data.SetName("mongodbatlas.process.cpu.children.usage.max") m.data.SetDescription("CPU Usage for child processes (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUChildrenUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUChildrenUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1317,7 +1318,7 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageMax) recordDataPoint(start pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1328,7 +1329,7 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUChildrenUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUChildrenUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1339,14 +1340,14 @@ func (m *metricMongodbatlasProcessCPUChildrenUsageMax) emit(metrics pdata.Metric func newMetricMongodbatlasProcessCPUChildrenUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUChildrenUsageMax { m := metricMongodbatlasProcessCPUChildrenUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUNormalizedUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1356,11 +1357,11 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) init() { m.data.SetName("mongodbatlas.process.cpu.normalized.usage.average") m.data.SetDescription("CPU Usage, normalized to pct") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1368,7 +1369,7 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) recordDataPoint(sta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1379,7 +1380,7 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1390,14 +1391,14 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageAverage) emit(metrics pdata. func newMetricMongodbatlasProcessCPUNormalizedUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUNormalizedUsageAverage { m := metricMongodbatlasProcessCPUNormalizedUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUNormalizedUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1407,11 +1408,11 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) init() { m.data.SetName("mongodbatlas.process.cpu.normalized.usage.max") m.data.SetDescription("CPU Usage, normalized to pct") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1419,7 +1420,7 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) recordDataPoint(start p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1430,7 +1431,7 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1441,14 +1442,14 @@ func (m *metricMongodbatlasProcessCPUNormalizedUsageMax) emit(metrics pdata.Metr func newMetricMongodbatlasProcessCPUNormalizedUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUNormalizedUsageMax { m := metricMongodbatlasProcessCPUNormalizedUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1458,11 +1459,11 @@ func (m *metricMongodbatlasProcessCPUUsageAverage) init() { m.data.SetName("mongodbatlas.process.cpu.usage.average") m.data.SetDescription("CPU Usage (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1470,7 +1471,7 @@ func (m *metricMongodbatlasProcessCPUUsageAverage) recordDataPoint(start pdata.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1481,7 +1482,7 @@ func (m *metricMongodbatlasProcessCPUUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1492,14 +1493,14 @@ func (m *metricMongodbatlasProcessCPUUsageAverage) emit(metrics pdata.MetricSlic func newMetricMongodbatlasProcessCPUUsageAverage(settings MetricSettings) metricMongodbatlasProcessCPUUsageAverage { m := metricMongodbatlasProcessCPUUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCPUUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1509,11 +1510,11 @@ func (m *metricMongodbatlasProcessCPUUsageMax) init() { m.data.SetName("mongodbatlas.process.cpu.usage.max") m.data.SetDescription("CPU Usage (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCPUUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessCPUUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1521,7 +1522,7 @@ func (m *metricMongodbatlasProcessCPUUsageMax) recordDataPoint(start pdata.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1532,7 +1533,7 @@ func (m *metricMongodbatlasProcessCPUUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCPUUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCPUUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1543,14 +1544,14 @@ func (m *metricMongodbatlasProcessCPUUsageMax) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessCPUUsageMax(settings MetricSettings) metricMongodbatlasProcessCPUUsageMax { m := metricMongodbatlasProcessCPUUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessCursors struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1560,11 +1561,11 @@ func (m *metricMongodbatlasProcessCursors) init() { m.data.SetName("mongodbatlas.process.cursors") m.data.SetDescription("Number of cursors") m.data.SetUnit("{cursors}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessCursors) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cursorStateAttributeValue string) { +func (m *metricMongodbatlasProcessCursors) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cursorStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1572,7 +1573,7 @@ func (m *metricMongodbatlasProcessCursors) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CursorState, pdata.NewValueString(cursorStateAttributeValue)) + dp.Attributes().Insert(A.CursorState, pcommon.NewValueString(cursorStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1583,7 +1584,7 @@ func (m *metricMongodbatlasProcessCursors) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessCursors) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessCursors) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1594,14 +1595,14 @@ func (m *metricMongodbatlasProcessCursors) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessCursors(settings MetricSettings) metricMongodbatlasProcessCursors { m := metricMongodbatlasProcessCursors{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessDbDocumentRate struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1611,11 +1612,11 @@ func (m *metricMongodbatlasProcessDbDocumentRate) init() { m.data.SetName("mongodbatlas.process.db.document.rate") m.data.SetDescription("Document access rates") m.data.SetUnit("{documents}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessDbDocumentRate) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, documentStatusAttributeValue string) { +func (m *metricMongodbatlasProcessDbDocumentRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, documentStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -1623,7 +1624,7 @@ func (m *metricMongodbatlasProcessDbDocumentRate) recordDataPoint(start pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.DocumentStatus, pdata.NewValueString(documentStatusAttributeValue)) + dp.Attributes().Insert(A.DocumentStatus, pcommon.NewValueString(documentStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1634,7 +1635,7 @@ func (m *metricMongodbatlasProcessDbDocumentRate) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessDbDocumentRate) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessDbDocumentRate) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1645,14 +1646,14 @@ func (m *metricMongodbatlasProcessDbDocumentRate) emit(metrics pdata.MetricSlice func newMetricMongodbatlasProcessDbDocumentRate(settings MetricSettings) metricMongodbatlasProcessDbDocumentRate { m := metricMongodbatlasProcessDbDocumentRate{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessDbOperationsRate struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1662,11 +1663,11 @@ func (m *metricMongodbatlasProcessDbOperationsRate) init() { m.data.SetName("mongodbatlas.process.db.operations.rate") m.data.SetDescription("DB Operation Rates") m.data.SetUnit("{operations}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessDbOperationsRate) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) { +func (m *metricMongodbatlasProcessDbOperationsRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) { if !m.settings.Enabled { return } @@ -1674,8 +1675,8 @@ func (m *metricMongodbatlasProcessDbOperationsRate) recordDataPoint(start pdata. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) - dp.Attributes().Insert(A.ClusterRole, pdata.NewValueString(clusterRoleAttributeValue)) + dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.ClusterRole, pcommon.NewValueString(clusterRoleAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1686,7 +1687,7 @@ func (m *metricMongodbatlasProcessDbOperationsRate) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessDbOperationsRate) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessDbOperationsRate) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1697,14 +1698,14 @@ func (m *metricMongodbatlasProcessDbOperationsRate) emit(metrics pdata.MetricSli func newMetricMongodbatlasProcessDbOperationsRate(settings MetricSettings) metricMongodbatlasProcessDbOperationsRate { m := metricMongodbatlasProcessDbOperationsRate{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessDbOperationsTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1714,13 +1715,13 @@ func (m *metricMongodbatlasProcessDbOperationsTime) init() { m.data.SetName("mongodbatlas.process.db.operations.time") m.data.SetDescription("DB Operation Times") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessDbOperationsTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, executionTypeAttributeValue string) { +func (m *metricMongodbatlasProcessDbOperationsTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, executionTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -1728,7 +1729,7 @@ func (m *metricMongodbatlasProcessDbOperationsTime) recordDataPoint(start pdata. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.ExecutionType, pdata.NewValueString(executionTypeAttributeValue)) + dp.Attributes().Insert(A.ExecutionType, pcommon.NewValueString(executionTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1739,7 +1740,7 @@ func (m *metricMongodbatlasProcessDbOperationsTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessDbOperationsTime) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessDbOperationsTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1750,14 +1751,14 @@ func (m *metricMongodbatlasProcessDbOperationsTime) emit(metrics pdata.MetricSli func newMetricMongodbatlasProcessDbOperationsTime(settings MetricSettings) metricMongodbatlasProcessDbOperationsTime { m := metricMongodbatlasProcessDbOperationsTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessDbQueryExecutorScanned struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1767,11 +1768,11 @@ func (m *metricMongodbatlasProcessDbQueryExecutorScanned) init() { m.data.SetName("mongodbatlas.process.db.query_executor.scanned") m.data.SetDescription("Scanned objects") m.data.SetUnit("{objects}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessDbQueryExecutorScanned) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { +func (m *metricMongodbatlasProcessDbQueryExecutorScanned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -1779,7 +1780,7 @@ func (m *metricMongodbatlasProcessDbQueryExecutorScanned) recordDataPoint(start dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.ScannedType, pdata.NewValueString(scannedTypeAttributeValue)) + dp.Attributes().Insert(A.ScannedType, pcommon.NewValueString(scannedTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1790,7 +1791,7 @@ func (m *metricMongodbatlasProcessDbQueryExecutorScanned) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessDbQueryExecutorScanned) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessDbQueryExecutorScanned) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1801,14 +1802,14 @@ func (m *metricMongodbatlasProcessDbQueryExecutorScanned) emit(metrics pdata.Met func newMetricMongodbatlasProcessDbQueryExecutorScanned(settings MetricSettings) metricMongodbatlasProcessDbQueryExecutorScanned { m := metricMongodbatlasProcessDbQueryExecutorScanned{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessDbQueryTargetingScannedPerReturned struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1818,11 +1819,11 @@ func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) init() { m.data.SetName("mongodbatlas.process.db.query_targeting.scanned_per_returned") m.data.SetDescription("Scanned objects per returned") m.data.SetUnit("{scanned}/{returned}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { +func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -1830,7 +1831,7 @@ func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) recordData dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.ScannedType, pdata.NewValueString(scannedTypeAttributeValue)) + dp.Attributes().Insert(A.ScannedType, pcommon.NewValueString(scannedTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1841,7 +1842,7 @@ func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) updateCapa } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1852,14 +1853,14 @@ func (m *metricMongodbatlasProcessDbQueryTargetingScannedPerReturned) emit(metri func newMetricMongodbatlasProcessDbQueryTargetingScannedPerReturned(settings MetricSettings) metricMongodbatlasProcessDbQueryTargetingScannedPerReturned { m := metricMongodbatlasProcessDbQueryTargetingScannedPerReturned{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessDbStorage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1869,11 +1870,11 @@ func (m *metricMongodbatlasProcessDbStorage) init() { m.data.SetName("mongodbatlas.process.db.storage") m.data.SetDescription("Storage used by the database") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessDbStorage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, storageStatusAttributeValue string) { +func (m *metricMongodbatlasProcessDbStorage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, storageStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -1881,7 +1882,7 @@ func (m *metricMongodbatlasProcessDbStorage) recordDataPoint(start pdata.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.StorageStatus, pdata.NewValueString(storageStatusAttributeValue)) + dp.Attributes().Insert(A.StorageStatus, pcommon.NewValueString(storageStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1892,7 +1893,7 @@ func (m *metricMongodbatlasProcessDbStorage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessDbStorage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessDbStorage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1903,14 +1904,14 @@ func (m *metricMongodbatlasProcessDbStorage) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessDbStorage(settings MetricSettings) metricMongodbatlasProcessDbStorage { m := metricMongodbatlasProcessDbStorage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessFtsCPUUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1920,11 +1921,11 @@ func (m *metricMongodbatlasProcessFtsCPUUsage) init() { m.data.SetName("mongodbatlas.process.fts.cpu.usage") m.data.SetDescription("Full text search CPU (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessFtsCPUUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasProcessFtsCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1932,7 +1933,7 @@ func (m *metricMongodbatlasProcessFtsCPUUsage) recordDataPoint(start pdata.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1943,7 +1944,7 @@ func (m *metricMongodbatlasProcessFtsCPUUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessFtsCPUUsage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessFtsCPUUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1954,14 +1955,14 @@ func (m *metricMongodbatlasProcessFtsCPUUsage) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessFtsCPUUsage(settings MetricSettings) metricMongodbatlasProcessFtsCPUUsage { m := metricMongodbatlasProcessFtsCPUUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessGlobalLock struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1971,11 +1972,11 @@ func (m *metricMongodbatlasProcessGlobalLock) init() { m.data.SetName("mongodbatlas.process.global_lock") m.data.SetDescription("Number and status of locks") m.data.SetUnit("{locks}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessGlobalLock) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, globalLockStateAttributeValue string) { +func (m *metricMongodbatlasProcessGlobalLock) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, globalLockStateAttributeValue string) { if !m.settings.Enabled { return } @@ -1983,7 +1984,7 @@ func (m *metricMongodbatlasProcessGlobalLock) recordDataPoint(start pdata.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.GlobalLockState, pdata.NewValueString(globalLockStateAttributeValue)) + dp.Attributes().Insert(A.GlobalLockState, pcommon.NewValueString(globalLockStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -1994,7 +1995,7 @@ func (m *metricMongodbatlasProcessGlobalLock) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessGlobalLock) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessGlobalLock) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2005,14 +2006,14 @@ func (m *metricMongodbatlasProcessGlobalLock) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessGlobalLock(settings MetricSettings) metricMongodbatlasProcessGlobalLock { m := metricMongodbatlasProcessGlobalLock{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessIndexBtreeMissRatio struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2022,10 +2023,10 @@ func (m *metricMongodbatlasProcessIndexBtreeMissRatio) init() { m.data.SetName("mongodbatlas.process.index.btree_miss_ratio") m.data.SetDescription("Index miss ratio (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasProcessIndexBtreeMissRatio) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessIndexBtreeMissRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -2043,7 +2044,7 @@ func (m *metricMongodbatlasProcessIndexBtreeMissRatio) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessIndexBtreeMissRatio) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessIndexBtreeMissRatio) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2054,14 +2055,14 @@ func (m *metricMongodbatlasProcessIndexBtreeMissRatio) emit(metrics pdata.Metric func newMetricMongodbatlasProcessIndexBtreeMissRatio(settings MetricSettings) metricMongodbatlasProcessIndexBtreeMissRatio { m := metricMongodbatlasProcessIndexBtreeMissRatio{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessIndexCounters struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2071,11 +2072,11 @@ func (m *metricMongodbatlasProcessIndexCounters) init() { m.data.SetName("mongodbatlas.process.index.counters") m.data.SetDescription("Indexes") m.data.SetUnit("{indexes}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessIndexCounters) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, btreeCounterTypeAttributeValue string) { +func (m *metricMongodbatlasProcessIndexCounters) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -2083,7 +2084,7 @@ func (m *metricMongodbatlasProcessIndexCounters) recordDataPoint(start pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.BtreeCounterType, pdata.NewValueString(btreeCounterTypeAttributeValue)) + dp.Attributes().Insert(A.BtreeCounterType, pcommon.NewValueString(btreeCounterTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2094,7 +2095,7 @@ func (m *metricMongodbatlasProcessIndexCounters) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessIndexCounters) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessIndexCounters) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2105,14 +2106,14 @@ func (m *metricMongodbatlasProcessIndexCounters) emit(metrics pdata.MetricSlice) func newMetricMongodbatlasProcessIndexCounters(settings MetricSettings) metricMongodbatlasProcessIndexCounters { m := metricMongodbatlasProcessIndexCounters{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessJournalingCommits struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2122,10 +2123,10 @@ func (m *metricMongodbatlasProcessJournalingCommits) init() { m.data.SetName("mongodbatlas.process.journaling.commits") m.data.SetDescription("Journaling commits") m.data.SetUnit("{commits}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasProcessJournalingCommits) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessJournalingCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -2143,7 +2144,7 @@ func (m *metricMongodbatlasProcessJournalingCommits) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessJournalingCommits) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessJournalingCommits) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2154,14 +2155,14 @@ func (m *metricMongodbatlasProcessJournalingCommits) emit(metrics pdata.MetricSl func newMetricMongodbatlasProcessJournalingCommits(settings MetricSettings) metricMongodbatlasProcessJournalingCommits { m := metricMongodbatlasProcessJournalingCommits{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessJournalingDataFiles struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2171,10 +2172,10 @@ func (m *metricMongodbatlasProcessJournalingDataFiles) init() { m.data.SetName("mongodbatlas.process.journaling.data_files") m.data.SetDescription("Data file sizes") m.data.SetUnit("MiBy") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasProcessJournalingDataFiles) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessJournalingDataFiles) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -2192,7 +2193,7 @@ func (m *metricMongodbatlasProcessJournalingDataFiles) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessJournalingDataFiles) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessJournalingDataFiles) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2203,14 +2204,14 @@ func (m *metricMongodbatlasProcessJournalingDataFiles) emit(metrics pdata.Metric func newMetricMongodbatlasProcessJournalingDataFiles(settings MetricSettings) metricMongodbatlasProcessJournalingDataFiles { m := metricMongodbatlasProcessJournalingDataFiles{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessJournalingWritten struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2220,10 +2221,10 @@ func (m *metricMongodbatlasProcessJournalingWritten) init() { m.data.SetName("mongodbatlas.process.journaling.written") m.data.SetDescription("Journals written") m.data.SetUnit("MiBy") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasProcessJournalingWritten) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessJournalingWritten) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -2241,7 +2242,7 @@ func (m *metricMongodbatlasProcessJournalingWritten) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessJournalingWritten) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessJournalingWritten) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2252,14 +2253,14 @@ func (m *metricMongodbatlasProcessJournalingWritten) emit(metrics pdata.MetricSl func newMetricMongodbatlasProcessJournalingWritten(settings MetricSettings) metricMongodbatlasProcessJournalingWritten { m := metricMongodbatlasProcessJournalingWritten{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessMemoryUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2269,11 +2270,11 @@ func (m *metricMongodbatlasProcessMemoryUsage) init() { m.data.SetName("mongodbatlas.process.memory.usage") m.data.SetDescription("Memory Usage") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { +func (m *metricMongodbatlasProcessMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) { if !m.settings.Enabled { return } @@ -2281,7 +2282,7 @@ func (m *metricMongodbatlasProcessMemoryUsage) recordDataPoint(start pdata.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.MemoryState, pdata.NewValueString(memoryStateAttributeValue)) + dp.Attributes().Insert(A.MemoryState, pcommon.NewValueString(memoryStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2292,7 +2293,7 @@ func (m *metricMongodbatlasProcessMemoryUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessMemoryUsage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2303,14 +2304,14 @@ func (m *metricMongodbatlasProcessMemoryUsage) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessMemoryUsage(settings MetricSettings) metricMongodbatlasProcessMemoryUsage { m := metricMongodbatlasProcessMemoryUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessNetworkIo struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2320,11 +2321,11 @@ func (m *metricMongodbatlasProcessNetworkIo) init() { m.data.SetName("mongodbatlas.process.network.io") m.data.SetDescription("Network IO") m.data.SetUnit("By/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessNetworkIo) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (m *metricMongodbatlasProcessNetworkIo) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -2332,7 +2333,7 @@ func (m *metricMongodbatlasProcessNetworkIo) recordDataPoint(start pdata.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2343,7 +2344,7 @@ func (m *metricMongodbatlasProcessNetworkIo) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessNetworkIo) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessNetworkIo) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2354,14 +2355,14 @@ func (m *metricMongodbatlasProcessNetworkIo) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessNetworkIo(settings MetricSettings) metricMongodbatlasProcessNetworkIo { m := metricMongodbatlasProcessNetworkIo{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessNetworkRequests struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2371,12 +2372,12 @@ func (m *metricMongodbatlasProcessNetworkRequests) init() { m.data.SetName("mongodbatlas.process.network.requests") m.data.SetDescription("Network requests") m.data.SetUnit("{requests}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricMongodbatlasProcessNetworkRequests) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessNetworkRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -2394,7 +2395,7 @@ func (m *metricMongodbatlasProcessNetworkRequests) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessNetworkRequests) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessNetworkRequests) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2405,14 +2406,14 @@ func (m *metricMongodbatlasProcessNetworkRequests) emit(metrics pdata.MetricSlic func newMetricMongodbatlasProcessNetworkRequests(settings MetricSettings) metricMongodbatlasProcessNetworkRequests { m := metricMongodbatlasProcessNetworkRequests{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessOplogRate struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2422,10 +2423,10 @@ func (m *metricMongodbatlasProcessOplogRate) init() { m.data.SetName("mongodbatlas.process.oplog.rate") m.data.SetDescription("Execution rate by operation") m.data.SetUnit("GiBy/h") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasProcessOplogRate) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessOplogRate) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -2443,7 +2444,7 @@ func (m *metricMongodbatlasProcessOplogRate) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessOplogRate) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessOplogRate) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2454,14 +2455,14 @@ func (m *metricMongodbatlasProcessOplogRate) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessOplogRate(settings MetricSettings) metricMongodbatlasProcessOplogRate { m := metricMongodbatlasProcessOplogRate{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessOplogTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2471,11 +2472,11 @@ func (m *metricMongodbatlasProcessOplogTime) init() { m.data.SetName("mongodbatlas.process.oplog.time") m.data.SetDescription("Execution time by operation") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessOplogTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, oplogTypeAttributeValue string) { +func (m *metricMongodbatlasProcessOplogTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, oplogTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -2483,7 +2484,7 @@ func (m *metricMongodbatlasProcessOplogTime) recordDataPoint(start pdata.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.OplogType, pdata.NewValueString(oplogTypeAttributeValue)) + dp.Attributes().Insert(A.OplogType, pcommon.NewValueString(oplogTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2494,7 +2495,7 @@ func (m *metricMongodbatlasProcessOplogTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessOplogTime) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessOplogTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2505,14 +2506,14 @@ func (m *metricMongodbatlasProcessOplogTime) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessOplogTime(settings MetricSettings) metricMongodbatlasProcessOplogTime { m := metricMongodbatlasProcessOplogTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessPageFaults struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2522,11 +2523,11 @@ func (m *metricMongodbatlasProcessPageFaults) init() { m.data.SetName("mongodbatlas.process.page_faults") m.data.SetDescription("Page faults") m.data.SetUnit("{faults}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessPageFaults) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryIssueTypeAttributeValue string) { +func (m *metricMongodbatlasProcessPageFaults) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -2534,7 +2535,7 @@ func (m *metricMongodbatlasProcessPageFaults) recordDataPoint(start pdata.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.MemoryIssueType, pdata.NewValueString(memoryIssueTypeAttributeValue)) + dp.Attributes().Insert(A.MemoryIssueType, pcommon.NewValueString(memoryIssueTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2545,7 +2546,7 @@ func (m *metricMongodbatlasProcessPageFaults) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessPageFaults) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessPageFaults) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2556,14 +2557,14 @@ func (m *metricMongodbatlasProcessPageFaults) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessPageFaults(settings MetricSettings) metricMongodbatlasProcessPageFaults { m := metricMongodbatlasProcessPageFaults{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessRestarts struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2573,10 +2574,10 @@ func (m *metricMongodbatlasProcessRestarts) init() { m.data.SetName("mongodbatlas.process.restarts") m.data.SetDescription("Restarts in last hour") m.data.SetUnit("{restarts}/h") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasProcessRestarts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasProcessRestarts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -2594,7 +2595,7 @@ func (m *metricMongodbatlasProcessRestarts) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessRestarts) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessRestarts) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2605,14 +2606,14 @@ func (m *metricMongodbatlasProcessRestarts) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessRestarts(settings MetricSettings) metricMongodbatlasProcessRestarts { m := metricMongodbatlasProcessRestarts{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasProcessTickets struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2622,11 +2623,11 @@ func (m *metricMongodbatlasProcessTickets) init() { m.data.SetName("mongodbatlas.process.tickets") m.data.SetDescription("Tickets") m.data.SetUnit("{tickets}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasProcessTickets) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, ticketTypeAttributeValue string) { +func (m *metricMongodbatlasProcessTickets) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, ticketTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -2634,7 +2635,7 @@ func (m *metricMongodbatlasProcessTickets) recordDataPoint(start pdata.Timestamp dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.TicketType, pdata.NewValueString(ticketTypeAttributeValue)) + dp.Attributes().Insert(A.TicketType, pcommon.NewValueString(ticketTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2645,7 +2646,7 @@ func (m *metricMongodbatlasProcessTickets) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasProcessTickets) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasProcessTickets) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2656,14 +2657,14 @@ func (m *metricMongodbatlasProcessTickets) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasProcessTickets(settings MetricSettings) metricMongodbatlasProcessTickets { m := metricMongodbatlasProcessTickets{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemCPUNormalizedUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2673,11 +2674,11 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) init() { m.data.SetName("mongodbatlas.system.cpu.normalized.usage.average") m.data.SetDescription("System CPU Normalized to pct") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -2685,7 +2686,7 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) recordDataPoint(star dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2696,7 +2697,7 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2707,14 +2708,14 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageAverage) emit(metrics pdata.M func newMetricMongodbatlasSystemCPUNormalizedUsageAverage(settings MetricSettings) metricMongodbatlasSystemCPUNormalizedUsageAverage { m := metricMongodbatlasSystemCPUNormalizedUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemCPUNormalizedUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2724,11 +2725,11 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) init() { m.data.SetName("mongodbatlas.system.cpu.normalized.usage.max") m.data.SetDescription("System CPU Normalized to pct") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -2736,7 +2737,7 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) recordDataPoint(start pd dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2747,7 +2748,7 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2758,14 +2759,14 @@ func (m *metricMongodbatlasSystemCPUNormalizedUsageMax) emit(metrics pdata.Metri func newMetricMongodbatlasSystemCPUNormalizedUsageMax(settings MetricSettings) metricMongodbatlasSystemCPUNormalizedUsageMax { m := metricMongodbatlasSystemCPUNormalizedUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemCPUUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2775,11 +2776,11 @@ func (m *metricMongodbatlasSystemCPUUsageAverage) init() { m.data.SetName("mongodbatlas.system.cpu.usage.average") m.data.SetDescription("System CPU Usage (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemCPUUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasSystemCPUUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -2787,7 +2788,7 @@ func (m *metricMongodbatlasSystemCPUUsageAverage) recordDataPoint(start pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2798,7 +2799,7 @@ func (m *metricMongodbatlasSystemCPUUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemCPUUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemCPUUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2809,14 +2810,14 @@ func (m *metricMongodbatlasSystemCPUUsageAverage) emit(metrics pdata.MetricSlice func newMetricMongodbatlasSystemCPUUsageAverage(settings MetricSettings) metricMongodbatlasSystemCPUUsageAverage { m := metricMongodbatlasSystemCPUUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemCPUUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2826,11 +2827,11 @@ func (m *metricMongodbatlasSystemCPUUsageMax) init() { m.data.SetName("mongodbatlas.system.cpu.usage.max") m.data.SetDescription("System CPU Usage (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemCPUUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasSystemCPUUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -2838,7 +2839,7 @@ func (m *metricMongodbatlasSystemCPUUsageMax) recordDataPoint(start pdata.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2849,7 +2850,7 @@ func (m *metricMongodbatlasSystemCPUUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemCPUUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemCPUUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2860,14 +2861,14 @@ func (m *metricMongodbatlasSystemCPUUsageMax) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasSystemCPUUsageMax(settings MetricSettings) metricMongodbatlasSystemCPUUsageMax { m := metricMongodbatlasSystemCPUUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemFtsCPUNormalizedUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2877,11 +2878,11 @@ func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) init() { m.data.SetName("mongodbatlas.system.fts.cpu.normalized.usage") m.data.SetDescription("Full text search disk usage (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -2889,7 +2890,7 @@ func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) recordDataPoint(start pd dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2900,7 +2901,7 @@ func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2911,14 +2912,14 @@ func (m *metricMongodbatlasSystemFtsCPUNormalizedUsage) emit(metrics pdata.Metri func newMetricMongodbatlasSystemFtsCPUNormalizedUsage(settings MetricSettings) metricMongodbatlasSystemFtsCPUNormalizedUsage { m := metricMongodbatlasSystemFtsCPUNormalizedUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemFtsCPUUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2928,11 +2929,11 @@ func (m *metricMongodbatlasSystemFtsCPUUsage) init() { m.data.SetName("mongodbatlas.system.fts.cpu.usage") m.data.SetDescription("Full-text search (%)") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemFtsCPUUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (m *metricMongodbatlasSystemFtsCPUUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { if !m.settings.Enabled { return } @@ -2940,7 +2941,7 @@ func (m *metricMongodbatlasSystemFtsCPUUsage) recordDataPoint(start pdata.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.CPUState, pdata.NewValueString(cpuStateAttributeValue)) + dp.Attributes().Insert(A.CPUState, pcommon.NewValueString(cpuStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -2951,7 +2952,7 @@ func (m *metricMongodbatlasSystemFtsCPUUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemFtsCPUUsage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemFtsCPUUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -2962,14 +2963,14 @@ func (m *metricMongodbatlasSystemFtsCPUUsage) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasSystemFtsCPUUsage(settings MetricSettings) metricMongodbatlasSystemFtsCPUUsage { m := metricMongodbatlasSystemFtsCPUUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemFtsDiskUsed struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -2979,10 +2980,10 @@ func (m *metricMongodbatlasSystemFtsDiskUsed) init() { m.data.SetName("mongodbatlas.system.fts.disk.used") m.data.SetDescription("Full text search disk usage") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricMongodbatlasSystemFtsDiskUsed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricMongodbatlasSystemFtsDiskUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -3000,7 +3001,7 @@ func (m *metricMongodbatlasSystemFtsDiskUsed) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemFtsDiskUsed) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemFtsDiskUsed) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3011,14 +3012,14 @@ func (m *metricMongodbatlasSystemFtsDiskUsed) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasSystemFtsDiskUsed(settings MetricSettings) metricMongodbatlasSystemFtsDiskUsed { m := metricMongodbatlasSystemFtsDiskUsed{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemFtsMemoryUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3028,13 +3029,13 @@ func (m *metricMongodbatlasSystemFtsMemoryUsage) init() { m.data.SetName("mongodbatlas.system.fts.memory.usage") m.data.SetDescription("Full-text search") m.data.SetUnit("MiBy") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemFtsMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { +func (m *metricMongodbatlasSystemFtsMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) { if !m.settings.Enabled { return } @@ -3042,7 +3043,7 @@ func (m *metricMongodbatlasSystemFtsMemoryUsage) recordDataPoint(start pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.MemoryState, pdata.NewValueString(memoryStateAttributeValue)) + dp.Attributes().Insert(A.MemoryState, pcommon.NewValueString(memoryStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3053,7 +3054,7 @@ func (m *metricMongodbatlasSystemFtsMemoryUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemFtsMemoryUsage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemFtsMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3064,14 +3065,14 @@ func (m *metricMongodbatlasSystemFtsMemoryUsage) emit(metrics pdata.MetricSlice) func newMetricMongodbatlasSystemFtsMemoryUsage(settings MetricSettings) metricMongodbatlasSystemFtsMemoryUsage { m := metricMongodbatlasSystemFtsMemoryUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemMemoryUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3081,11 +3082,11 @@ func (m *metricMongodbatlasSystemMemoryUsageAverage) init() { m.data.SetName("mongodbatlas.system.memory.usage.average") m.data.SetDescription("System Memory Usage") m.data.SetUnit("KiBy") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemMemoryUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { +func (m *metricMongodbatlasSystemMemoryUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -3093,7 +3094,7 @@ func (m *metricMongodbatlasSystemMemoryUsageAverage) recordDataPoint(start pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.MemoryStatus, pdata.NewValueString(memoryStatusAttributeValue)) + dp.Attributes().Insert(A.MemoryStatus, pcommon.NewValueString(memoryStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3104,7 +3105,7 @@ func (m *metricMongodbatlasSystemMemoryUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemMemoryUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemMemoryUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3115,14 +3116,14 @@ func (m *metricMongodbatlasSystemMemoryUsageAverage) emit(metrics pdata.MetricSl func newMetricMongodbatlasSystemMemoryUsageAverage(settings MetricSettings) metricMongodbatlasSystemMemoryUsageAverage { m := metricMongodbatlasSystemMemoryUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemMemoryUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3132,11 +3133,11 @@ func (m *metricMongodbatlasSystemMemoryUsageMax) init() { m.data.SetName("mongodbatlas.system.memory.usage.max") m.data.SetDescription("System Memory Usage") m.data.SetUnit("KiBy") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemMemoryUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { +func (m *metricMongodbatlasSystemMemoryUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) { if !m.settings.Enabled { return } @@ -3144,7 +3145,7 @@ func (m *metricMongodbatlasSystemMemoryUsageMax) recordDataPoint(start pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.MemoryStatus, pdata.NewValueString(memoryStatusAttributeValue)) + dp.Attributes().Insert(A.MemoryStatus, pcommon.NewValueString(memoryStatusAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3155,7 +3156,7 @@ func (m *metricMongodbatlasSystemMemoryUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemMemoryUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemMemoryUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3166,14 +3167,14 @@ func (m *metricMongodbatlasSystemMemoryUsageMax) emit(metrics pdata.MetricSlice) func newMetricMongodbatlasSystemMemoryUsageMax(settings MetricSettings) metricMongodbatlasSystemMemoryUsageMax { m := metricMongodbatlasSystemMemoryUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemNetworkIoAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3183,11 +3184,11 @@ func (m *metricMongodbatlasSystemNetworkIoAverage) init() { m.data.SetName("mongodbatlas.system.network.io.average") m.data.SetDescription("System Network IO") m.data.SetUnit("By/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemNetworkIoAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (m *metricMongodbatlasSystemNetworkIoAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -3195,7 +3196,7 @@ func (m *metricMongodbatlasSystemNetworkIoAverage) recordDataPoint(start pdata.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3206,7 +3207,7 @@ func (m *metricMongodbatlasSystemNetworkIoAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemNetworkIoAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemNetworkIoAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3217,14 +3218,14 @@ func (m *metricMongodbatlasSystemNetworkIoAverage) emit(metrics pdata.MetricSlic func newMetricMongodbatlasSystemNetworkIoAverage(settings MetricSettings) metricMongodbatlasSystemNetworkIoAverage { m := metricMongodbatlasSystemNetworkIoAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemNetworkIoMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3234,11 +3235,11 @@ func (m *metricMongodbatlasSystemNetworkIoMax) init() { m.data.SetName("mongodbatlas.system.network.io.max") m.data.SetDescription("System Network IO") m.data.SetUnit("By/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemNetworkIoMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (m *metricMongodbatlasSystemNetworkIoMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -3246,7 +3247,7 @@ func (m *metricMongodbatlasSystemNetworkIoMax) recordDataPoint(start pdata.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3257,7 +3258,7 @@ func (m *metricMongodbatlasSystemNetworkIoMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemNetworkIoMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemNetworkIoMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3268,14 +3269,14 @@ func (m *metricMongodbatlasSystemNetworkIoMax) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasSystemNetworkIoMax(settings MetricSettings) metricMongodbatlasSystemNetworkIoMax { m := metricMongodbatlasSystemNetworkIoMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemPagingIoAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3285,11 +3286,11 @@ func (m *metricMongodbatlasSystemPagingIoAverage) init() { m.data.SetName("mongodbatlas.system.paging.io.average") m.data.SetDescription("Swap IO") m.data.SetUnit("{pages}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemPagingIoAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (m *metricMongodbatlasSystemPagingIoAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -3297,7 +3298,7 @@ func (m *metricMongodbatlasSystemPagingIoAverage) recordDataPoint(start pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3308,7 +3309,7 @@ func (m *metricMongodbatlasSystemPagingIoAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemPagingIoAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemPagingIoAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3319,14 +3320,14 @@ func (m *metricMongodbatlasSystemPagingIoAverage) emit(metrics pdata.MetricSlice func newMetricMongodbatlasSystemPagingIoAverage(settings MetricSettings) metricMongodbatlasSystemPagingIoAverage { m := metricMongodbatlasSystemPagingIoAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemPagingIoMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3336,11 +3337,11 @@ func (m *metricMongodbatlasSystemPagingIoMax) init() { m.data.SetName("mongodbatlas.system.paging.io.max") m.data.SetDescription("Swap IO") m.data.SetUnit("{pages}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemPagingIoMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (m *metricMongodbatlasSystemPagingIoMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -3348,7 +3349,7 @@ func (m *metricMongodbatlasSystemPagingIoMax) recordDataPoint(start pdata.Timest dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3359,7 +3360,7 @@ func (m *metricMongodbatlasSystemPagingIoMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemPagingIoMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemPagingIoMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3370,14 +3371,14 @@ func (m *metricMongodbatlasSystemPagingIoMax) emit(metrics pdata.MetricSlice) { func newMetricMongodbatlasSystemPagingIoMax(settings MetricSettings) metricMongodbatlasSystemPagingIoMax { m := metricMongodbatlasSystemPagingIoMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemPagingUsageAverage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3387,11 +3388,11 @@ func (m *metricMongodbatlasSystemPagingUsageAverage) init() { m.data.SetName("mongodbatlas.system.paging.usage.average") m.data.SetDescription("Swap usage") m.data.SetUnit("KiBy") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -3399,7 +3400,7 @@ func (m *metricMongodbatlasSystemPagingUsageAverage) recordDataPoint(start pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3410,7 +3411,7 @@ func (m *metricMongodbatlasSystemPagingUsageAverage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemPagingUsageAverage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemPagingUsageAverage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3421,14 +3422,14 @@ func (m *metricMongodbatlasSystemPagingUsageAverage) emit(metrics pdata.MetricSl func newMetricMongodbatlasSystemPagingUsageAverage(settings MetricSettings) metricMongodbatlasSystemPagingUsageAverage { m := metricMongodbatlasSystemPagingUsageAverage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbatlasSystemPagingUsageMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -3438,11 +3439,11 @@ func (m *metricMongodbatlasSystemPagingUsageMax) init() { m.data.SetName("mongodbatlas.system.paging.usage.max") m.data.SetDescription("Swap usage") m.data.SetUnit("KiBy") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -3450,7 +3451,7 @@ func (m *metricMongodbatlasSystemPagingUsageMax) recordDataPoint(start pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -3461,7 +3462,7 @@ func (m *metricMongodbatlasSystemPagingUsageMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbatlasSystemPagingUsageMax) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbatlasSystemPagingUsageMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -3472,7 +3473,7 @@ func (m *metricMongodbatlasSystemPagingUsageMax) emit(metrics pdata.MetricSlice) func newMetricMongodbatlasSystemPagingUsageMax(settings MetricSettings) metricMongodbatlasSystemPagingUsageMax { m := metricMongodbatlasSystemPagingUsageMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -3481,10 +3482,10 @@ func newMetricMongodbatlasSystemPagingUsageMax(settings MetricSettings) metricMo // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricMongodbatlasDbCounts metricMongodbatlasDbCounts metricMongodbatlasDbSize metricMongodbatlasDbSize metricMongodbatlasDiskPartitionIopsAverage metricMongodbatlasDiskPartitionIopsAverage @@ -3554,7 +3555,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -3562,8 +3563,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricMongodbatlasDbCounts: newMetricMongodbatlasDbCounts(settings.MongodbatlasDbCounts), metricMongodbatlasDbSize: newMetricMongodbatlasDbSize(settings.MongodbatlasDbSize), metricMongodbatlasDiskPartitionIopsAverage: newMetricMongodbatlasDiskPartitionIopsAverage(settings.MongodbatlasDiskPartitionIopsAverage), @@ -3635,7 +3636,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -3645,67 +3646,67 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // WithMongodbAtlasDbName sets provided value as "mongodb_atlas.db.name" attribute for current resource. func WithMongodbAtlasDbName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.db.name", val) } } // WithMongodbAtlasDiskPartition sets provided value as "mongodb_atlas.disk.partition" attribute for current resource. func WithMongodbAtlasDiskPartition(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.disk.partition", val) } } // WithMongodbAtlasHostName sets provided value as "mongodb_atlas.host.name" attribute for current resource. func WithMongodbAtlasHostName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.host.name", val) } } // WithMongodbAtlasOrgName sets provided value as "mongodb_atlas.org_name" attribute for current resource. func WithMongodbAtlasOrgName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.org_name", val) } } // WithMongodbAtlasProcessID sets provided value as "mongodb_atlas.process.id" attribute for current resource. func WithMongodbAtlasProcessID(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.process.id", val) } } // WithMongodbAtlasProcessPort sets provided value as "mongodb_atlas.process.port" attribute for current resource. func WithMongodbAtlasProcessPort(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.process.port", val) } } // WithMongodbAtlasProcessTypeName sets provided value as "mongodb_atlas.process.type_name" attribute for current resource. func WithMongodbAtlasProcessTypeName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.process.type_name", val) } } // WithMongodbAtlasProjectID sets provided value as "mongodb_atlas.project.id" attribute for current resource. func WithMongodbAtlasProjectID(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.project.id", val) } } // WithMongodbAtlasProjectName sets provided value as "mongodb_atlas.project.name" attribute for current resource. func WithMongodbAtlasProjectName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("mongodb_atlas.project.name", val) } } @@ -3715,7 +3716,7 @@ func WithMongodbAtlasProjectName(val string) ResourceOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -3795,332 +3796,332 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordMongodbatlasDbCountsDataPoint adds a data point to mongodbatlas.db.counts metric. -func (mb *MetricsBuilder) RecordMongodbatlasDbCountsDataPoint(ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDbCountsDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) { mb.metricMongodbatlasDbCounts.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue) } // RecordMongodbatlasDbSizeDataPoint adds a data point to mongodbatlas.db.size metric. -func (mb *MetricsBuilder) RecordMongodbatlasDbSizeDataPoint(ts pdata.Timestamp, val float64, objectTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDbSizeDataPoint(ts pcommon.Timestamp, val float64, objectTypeAttributeValue string) { mb.metricMongodbatlasDbSize.recordDataPoint(mb.startTime, ts, val, objectTypeAttributeValue) } // RecordMongodbatlasDiskPartitionIopsAverageDataPoint adds a data point to mongodbatlas.disk.partition.iops.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { mb.metricMongodbatlasDiskPartitionIopsAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) } // RecordMongodbatlasDiskPartitionIopsMaxDataPoint adds a data point to mongodbatlas.disk.partition.iops.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { mb.metricMongodbatlasDiskPartitionIopsMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) } // RecordMongodbatlasDiskPartitionLatencyAverageDataPoint adds a data point to mongodbatlas.disk.partition.latency.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { mb.metricMongodbatlasDiskPartitionLatencyAverage.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) } // RecordMongodbatlasDiskPartitionLatencyMaxDataPoint adds a data point to mongodbatlas.disk.partition.latency.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts pdata.Timestamp, val float64, diskDirectionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts pcommon.Timestamp, val float64, diskDirectionAttributeValue string) { mb.metricMongodbatlasDiskPartitionLatencyMax.recordDataPoint(mb.startTime, ts, val, diskDirectionAttributeValue) } // RecordMongodbatlasDiskPartitionSpaceAverageDataPoint adds a data point to mongodbatlas.disk.partition.space.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { mb.metricMongodbatlasDiskPartitionSpaceAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) } // RecordMongodbatlasDiskPartitionSpaceMaxDataPoint adds a data point to mongodbatlas.disk.partition.space.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { mb.metricMongodbatlasDiskPartitionSpaceMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) } // RecordMongodbatlasDiskPartitionUsageAverageDataPoint adds a data point to mongodbatlas.disk.partition.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { mb.metricMongodbatlasDiskPartitionUsageAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) } // RecordMongodbatlasDiskPartitionUsageMaxDataPoint adds a data point to mongodbatlas.disk.partition.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUsageMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { mb.metricMongodbatlasDiskPartitionUsageMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) } // RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint adds a data point to mongodbatlas.disk.partition.utilization.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { mb.metricMongodbatlasDiskPartitionUtilizationAverage.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) } // RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint adds a data point to mongodbatlas.disk.partition.utilization.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts pdata.Timestamp, val float64, diskStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts pcommon.Timestamp, val float64, diskStatusAttributeValue string) { mb.metricMongodbatlasDiskPartitionUtilizationMax.recordDataPoint(mb.startTime, ts, val, diskStatusAttributeValue) } // RecordMongodbatlasProcessAssertsDataPoint adds a data point to mongodbatlas.process.asserts metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessAssertsDataPoint(ts pdata.Timestamp, val float64, assertTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessAssertsDataPoint(ts pcommon.Timestamp, val float64, assertTypeAttributeValue string) { mb.metricMongodbatlasProcessAsserts.recordDataPoint(mb.startTime, ts, val, assertTypeAttributeValue) } // RecordMongodbatlasProcessBackgroundFlushDataPoint adds a data point to mongodbatlas.process.background_flush metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessBackgroundFlushDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessBackgroundFlushDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessBackgroundFlush.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessCacheIoDataPoint adds a data point to mongodbatlas.process.cache.io metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pdata.Timestamp, val float64, cacheDirectionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheIoDataPoint(ts pcommon.Timestamp, val float64, cacheDirectionAttributeValue string) { mb.metricMongodbatlasProcessCacheIo.recordDataPoint(mb.startTime, ts, val, cacheDirectionAttributeValue) } // RecordMongodbatlasProcessCacheSizeDataPoint adds a data point to mongodbatlas.process.cache.size metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pdata.Timestamp, val float64, cacheStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCacheSizeDataPoint(ts pcommon.Timestamp, val float64, cacheStatusAttributeValue string) { mb.metricMongodbatlasProcessCacheSize.recordDataPoint(mb.startTime, ts, val, cacheStatusAttributeValue) } // RecordMongodbatlasProcessConnectionsDataPoint adds a data point to mongodbatlas.process.connections metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessConnectionsDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessConnectionsDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessConnections.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.normalized.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUChildrenNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUChildrenUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.children.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUChildrenUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.normalized.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCPUUsageAverageDataPoint adds a data point to mongodbatlas.process.cpu.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCPUUsageMaxDataPoint adds a data point to mongodbatlas.process.cpu.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessCursorsDataPoint adds a data point to mongodbatlas.process.cursors metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessCursorsDataPoint(ts pdata.Timestamp, val float64, cursorStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessCursorsDataPoint(ts pcommon.Timestamp, val float64, cursorStateAttributeValue string) { mb.metricMongodbatlasProcessCursors.recordDataPoint(mb.startTime, ts, val, cursorStateAttributeValue) } // RecordMongodbatlasProcessDbDocumentRateDataPoint adds a data point to mongodbatlas.process.db.document.rate metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessDbDocumentRateDataPoint(ts pdata.Timestamp, val float64, documentStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbDocumentRateDataPoint(ts pcommon.Timestamp, val float64, documentStatusAttributeValue string) { mb.metricMongodbatlasProcessDbDocumentRate.recordDataPoint(mb.startTime, ts, val, documentStatusAttributeValue) } // RecordMongodbatlasProcessDbOperationsRateDataPoint adds a data point to mongodbatlas.process.db.operations.rate metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsRateDataPoint(ts pdata.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsRateDataPoint(ts pcommon.Timestamp, val float64, operationAttributeValue string, clusterRoleAttributeValue string) { mb.metricMongodbatlasProcessDbOperationsRate.recordDataPoint(mb.startTime, ts, val, operationAttributeValue, clusterRoleAttributeValue) } // RecordMongodbatlasProcessDbOperationsTimeDataPoint adds a data point to mongodbatlas.process.db.operations.time metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts pdata.Timestamp, val float64, executionTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts pcommon.Timestamp, val float64, executionTypeAttributeValue string) { mb.metricMongodbatlasProcessDbOperationsTime.recordDataPoint(mb.startTime, ts, val, executionTypeAttributeValue) } // RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint adds a data point to mongodbatlas.process.db.query_executor.scanned metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) { mb.metricMongodbatlasProcessDbQueryExecutorScanned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue) } // RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint adds a data point to mongodbatlas.process.db.query_targeting.scanned_per_returned metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts pdata.Timestamp, val float64, scannedTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts pcommon.Timestamp, val float64, scannedTypeAttributeValue string) { mb.metricMongodbatlasProcessDbQueryTargetingScannedPerReturned.recordDataPoint(mb.startTime, ts, val, scannedTypeAttributeValue) } // RecordMongodbatlasProcessDbStorageDataPoint adds a data point to mongodbatlas.process.db.storage metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessDbStorageDataPoint(ts pdata.Timestamp, val float64, storageStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessDbStorageDataPoint(ts pcommon.Timestamp, val float64, storageStatusAttributeValue string) { mb.metricMongodbatlasProcessDbStorage.recordDataPoint(mb.startTime, ts, val, storageStatusAttributeValue) } // RecordMongodbatlasProcessFtsCPUUsageDataPoint adds a data point to mongodbatlas.process.fts.cpu.usage metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessFtsCPUUsageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasProcessFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasProcessGlobalLockDataPoint adds a data point to mongodbatlas.process.global_lock metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessGlobalLockDataPoint(ts pdata.Timestamp, val float64, globalLockStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessGlobalLockDataPoint(ts pcommon.Timestamp, val float64, globalLockStateAttributeValue string) { mb.metricMongodbatlasProcessGlobalLock.recordDataPoint(mb.startTime, ts, val, globalLockStateAttributeValue) } // RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint adds a data point to mongodbatlas.process.index.btree_miss_ratio metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessIndexBtreeMissRatio.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessIndexCountersDataPoint adds a data point to mongodbatlas.process.index.counters metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexCountersDataPoint(ts pdata.Timestamp, val float64, btreeCounterTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessIndexCountersDataPoint(ts pcommon.Timestamp, val float64, btreeCounterTypeAttributeValue string) { mb.metricMongodbatlasProcessIndexCounters.recordDataPoint(mb.startTime, ts, val, btreeCounterTypeAttributeValue) } // RecordMongodbatlasProcessJournalingCommitsDataPoint adds a data point to mongodbatlas.process.journaling.commits metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingCommitsDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingCommitsDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessJournalingCommits.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessJournalingDataFilesDataPoint adds a data point to mongodbatlas.process.journaling.data_files metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessJournalingDataFiles.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessJournalingWrittenDataPoint adds a data point to mongodbatlas.process.journaling.written metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingWrittenDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessJournalingWrittenDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessJournalingWritten.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessMemoryUsageDataPoint adds a data point to mongodbatlas.process.memory.usage metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessMemoryUsageDataPoint(ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) { mb.metricMongodbatlasProcessMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue) } // RecordMongodbatlasProcessNetworkIoDataPoint adds a data point to mongodbatlas.process.network.io metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkIoDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkIoDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) { mb.metricMongodbatlasProcessNetworkIo.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordMongodbatlasProcessNetworkRequestsDataPoint adds a data point to mongodbatlas.process.network.requests metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkRequestsDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessNetworkRequestsDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessNetworkRequests.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessOplogRateDataPoint adds a data point to mongodbatlas.process.oplog.rate metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogRateDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogRateDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessOplogRate.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessOplogTimeDataPoint adds a data point to mongodbatlas.process.oplog.time metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogTimeDataPoint(ts pdata.Timestamp, val float64, oplogTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessOplogTimeDataPoint(ts pcommon.Timestamp, val float64, oplogTypeAttributeValue string) { mb.metricMongodbatlasProcessOplogTime.recordDataPoint(mb.startTime, ts, val, oplogTypeAttributeValue) } // RecordMongodbatlasProcessPageFaultsDataPoint adds a data point to mongodbatlas.process.page_faults metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessPageFaultsDataPoint(ts pdata.Timestamp, val float64, memoryIssueTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessPageFaultsDataPoint(ts pcommon.Timestamp, val float64, memoryIssueTypeAttributeValue string) { mb.metricMongodbatlasProcessPageFaults.recordDataPoint(mb.startTime, ts, val, memoryIssueTypeAttributeValue) } // RecordMongodbatlasProcessRestartsDataPoint adds a data point to mongodbatlas.process.restarts metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessRestartsDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessRestartsDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasProcessRestarts.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasProcessTicketsDataPoint adds a data point to mongodbatlas.process.tickets metric. -func (mb *MetricsBuilder) RecordMongodbatlasProcessTicketsDataPoint(ts pdata.Timestamp, val float64, ticketTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasProcessTicketsDataPoint(ts pcommon.Timestamp, val float64, ticketTypeAttributeValue string) { mb.metricMongodbatlasProcessTickets.recordDataPoint(mb.startTime, ts, val, ticketTypeAttributeValue) } // RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasSystemCPUNormalizedUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.normalized.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasSystemCPUNormalizedUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasSystemCPUUsageAverageDataPoint adds a data point to mongodbatlas.system.cpu.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasSystemCPUUsageAverage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasSystemCPUUsageMaxDataPoint adds a data point to mongodbatlas.system.cpu.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasSystemCPUUsageMax.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.normalized.usage metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasSystemFtsCPUNormalizedUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasSystemFtsCPUUsageDataPoint adds a data point to mongodbatlas.system.fts.cpu.usage metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts pdata.Timestamp, val float64, cpuStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts pcommon.Timestamp, val float64, cpuStateAttributeValue string) { mb.metricMongodbatlasSystemFtsCPUUsage.recordDataPoint(mb.startTime, ts, val, cpuStateAttributeValue) } // RecordMongodbatlasSystemFtsDiskUsedDataPoint adds a data point to mongodbatlas.system.fts.disk.used metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts pcommon.Timestamp, val float64) { mb.metricMongodbatlasSystemFtsDiskUsed.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbatlasSystemFtsMemoryUsageDataPoint adds a data point to mongodbatlas.system.fts.memory.usage metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts pdata.Timestamp, val float64, memoryStateAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts pcommon.Timestamp, val float64, memoryStateAttributeValue string) { mb.metricMongodbatlasSystemFtsMemoryUsage.recordDataPoint(mb.startTime, ts, val, memoryStateAttributeValue) } // RecordMongodbatlasSystemMemoryUsageAverageDataPoint adds a data point to mongodbatlas.system.memory.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) { mb.metricMongodbatlasSystemMemoryUsageAverage.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue) } // RecordMongodbatlasSystemMemoryUsageMaxDataPoint adds a data point to mongodbatlas.system.memory.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts pdata.Timestamp, val float64, memoryStatusAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts pcommon.Timestamp, val float64, memoryStatusAttributeValue string) { mb.metricMongodbatlasSystemMemoryUsageMax.recordDataPoint(mb.startTime, ts, val, memoryStatusAttributeValue) } // RecordMongodbatlasSystemNetworkIoAverageDataPoint adds a data point to mongodbatlas.system.network.io.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) { mb.metricMongodbatlasSystemNetworkIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordMongodbatlasSystemNetworkIoMaxDataPoint adds a data point to mongodbatlas.system.network.io.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) { mb.metricMongodbatlasSystemNetworkIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordMongodbatlasSystemPagingIoAverageDataPoint adds a data point to mongodbatlas.system.paging.io.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoAverageDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) { mb.metricMongodbatlasSystemPagingIoAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordMongodbatlasSystemPagingIoMaxDataPoint adds a data point to mongodbatlas.system.paging.io.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoMaxDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingIoMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) { mb.metricMongodbatlasSystemPagingIoMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordMongodbatlasSystemPagingUsageAverageDataPoint adds a data point to mongodbatlas.system.paging.usage.average metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) { mb.metricMongodbatlasSystemPagingUsageAverage.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordMongodbatlasSystemPagingUsageMaxDataPoint adds a data point to mongodbatlas.system.paging.usage.max metric. -func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pdata.Timestamp, val float64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts pcommon.Timestamp, val float64, directionAttributeValue string) { mb.metricMongodbatlasSystemPagingUsageMax.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go index 25f982703c5a..588d89a11139 100644 --- a/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go +++ b/receiver/mongodbatlasreceiver/internal/metadata/metric_name_mapping.go @@ -18,11 +18,11 @@ import ( "time" "go.mongodb.org/atlas/mongodbatlas" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) // metricRecordFunc records the data point to the metric builder at the supplied timestamp -type metricRecordFunc func(*MetricsBuilder, *mongodbatlas.DataPoints, pdata.Timestamp) +type metricRecordFunc func(*MetricsBuilder, *mongodbatlas.DataPoints, pcommon.Timestamp) // getRecordFunc returns the metricRecordFunc that matches the metric name. Nil if none is found. func getRecordFunc(metricName string) metricRecordFunc { @@ -30,685 +30,685 @@ func getRecordFunc(metricName string) metricRecordFunc { // MongoDB CPU usage. For hosts with more than one CPU core, these values can exceed 100%. case "PROCESS_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "MAX_PROCESS_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "PROCESS_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "MAX_PROCESS_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "PROCESS_CPU_CHILDREN_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "MAX_PROCESS_CPU_CHILDREN_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "PROCESS_CPU_CHILDREN_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "MAX_PROCESS_CPU_CHILDREN_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } // MongoDB CPU usage scaled to a range of 0% to 100%. Atlas computes this value by dividing by the number of CPU cores. case "PROCESS_NORMALIZED_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "MAX_PROCESS_NORMALIZED_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "PROCESS_NORMALIZED_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "MAX_PROCESS_NORMALIZED_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "PROCESS_NORMALIZED_CPU_CHILDREN_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } // Context: Process case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "MAX_PROCESS_NORMALIZED_CPU_CHILDREN_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCPUChildrenNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } // Rate of asserts for a MongoDB process found in the asserts document that the serverStatus command generates. case "ASSERT_REGULAR": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Regular) } case "ASSERT_WARNING": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Warning) } case "ASSERT_MSG": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.Msg) } case "ASSERT_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessAssertsDataPoint(ts, float64(*dp.Value), AttributeAssertType.User) } // Amount of data flushed in the background. case "BACKGROUND_FLUSH_AVG": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessBackgroundFlushDataPoint(ts, float64(*dp.Value)) } // Amount of bytes in the WiredTiger storage engine cache and tickets found in the wiredTiger.cache and wiredTiger.concurrentTransactions documents that the serverStatus command generates. case "CACHE_BYTES_READ_INTO": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirection.ReadInto) } case "CACHE_BYTES_WRITTEN_FROM": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCacheIoDataPoint(ts, float64(*dp.Value), AttributeCacheDirection.WrittenFrom) } case "CACHE_DIRTY_BYTES": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatus.Dirty) } case "CACHE_USED_BYTES": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCacheSizeDataPoint(ts, float64(*dp.Value), AttributeCacheStatus.Used) } case "TICKETS_AVAILABLE_READS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketType.AvailableReads) } case "TICKETS_AVAILABLE_WRITE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessTicketsDataPoint(ts, float64(*dp.Value), AttributeTicketType.AvailableWrites) } // Number of connections to a MongoDB process found in the connections document that the serverStatus command generates. case "CONNECTIONS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessConnectionsDataPoint(ts, float64(*dp.Value)) } // Number of cursors for a MongoDB process found in the metrics.cursor document that the serverStatus command generates. case "CURSORS_TOTAL_OPEN": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorState.Open) } case "CURSORS_TOTAL_TIMED_OUT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessCursorsDataPoint(ts, float64(*dp.Value), AttributeCursorState.TimedOut) } // Numbers of Memory Issues and Page Faults for a MongoDB process. case "EXTRA_INFO_PAGE_FAULTS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.ExtraInfo) } case "GLOBAL_ACCESSES_NOT_IN_MEMORY": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.GlobalAccessesNotInMemory) } case "GLOBAL_PAGE_FAULT_EXCEPTIONS_THROWN": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessPageFaultsDataPoint(ts, float64(*dp.Value), AttributeMemoryIssueType.ExceptionsThrown) } // Number of operations waiting on locks for the MongoDB process that the serverStatus command generates. Cloud Manager computes these values based on the type of storage engine. case "GLOBAL_LOCK_CURRENT_QUEUE_TOTAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueTotal) } case "GLOBAL_LOCK_CURRENT_QUEUE_READERS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueReaders) } case "GLOBAL_LOCK_CURRENT_QUEUE_WRITERS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessGlobalLockDataPoint(ts, float64(*dp.Value), AttributeGlobalLockState.CurrentQueueWriters) } // Number of index btree operations. case "INDEX_COUNTERS_BTREE_ACCESSES": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Accesses) } case "INDEX_COUNTERS_BTREE_HITS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Hits) } case "INDEX_COUNTERS_BTREE_MISSES": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessIndexCountersDataPoint(ts, float64(*dp.Value), AttributeBtreeCounterType.Misses) } case "INDEX_COUNTERS_BTREE_MISS_RATIO": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessIndexBtreeMissRatioDataPoint(ts, float64(*dp.Value)) } // Number of journaling operations. case "JOURNALING_COMMITS_IN_WRITE_LOCK": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessJournalingCommitsDataPoint(ts, float64(*dp.Value)) } case "JOURNALING_MB": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessJournalingWrittenDataPoint(ts, float64(*dp.Value)) } case "JOURNALING_WRITE_DATA_FILES_MB": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessJournalingDataFilesDataPoint(ts, float64(*dp.Value)) } // Amount of memory for a MongoDB process found in the mem document that the serverStatus command collects. case "MEMORY_RESIDENT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Resident) } case "MEMORY_VIRTUAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Virtual) } case "MEMORY_MAPPED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Mapped) } case "COMPUTED_MEMORY": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Computed) } // Amount of throughput for MongoDB process found in the network document that the serverStatus command collects. case "NETWORK_BYTES_IN": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) } case "NETWORK_BYTES_OUT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessNetworkIoDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) } case "NETWORK_NUM_REQUESTS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessNetworkRequestsDataPoint(ts, float64(*dp.Value)) } // Durations and throughput of the MongoDB process' oplog. case "OPLOG_SLAVE_LAG_MASTER_TIME": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.SlaveLagMasterTime) } case "OPLOG_MASTER_TIME": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.MasterTime) } case "OPLOG_MASTER_LAG_TIME_DIFF": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessOplogTimeDataPoint(ts, float64(*dp.Value), AttributeOplogType.MasterLagTimeDiff) } case "OPLOG_RATE_GB_PER_HOUR": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessOplogRateDataPoint(ts, float64(*dp.Value)) } // Number of database operations on a MongoDB process since the process last started. case "DB_STORAGE_TOTAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.Total) } case "DB_DATA_SIZE_TOTAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.DataSize) } case "DB_INDEX_SIZE_TOTAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.IndexSize) } case "DB_DATA_SIZE_TOTAL_WO_SYSTEM": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbStorageDataPoint(ts, float64(*dp.Value), AttributeStorageStatus.DataSizeWoSystem) } // Rate of database operations on a MongoDB process since the process last started found in the opcounters document that the serverStatus command collects. case "OPCOUNTER_CMD": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Cmd, AttributeClusterRole.Primary) } case "OPCOUNTER_QUERY": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Query, AttributeClusterRole.Primary) } case "OPCOUNTER_UPDATE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Update, AttributeClusterRole.Primary) } case "OPCOUNTER_DELETE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Delete, AttributeClusterRole.Primary) } case "OPCOUNTER_GETMORE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Getmore, AttributeClusterRole.Primary) } case "OPCOUNTER_INSERT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Insert, AttributeClusterRole.Primary) } // Rate of database operations on MongoDB secondaries found in the opcountersRepl document that the serverStatus command collects. case "OPCOUNTER_REPL_CMD": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Cmd, AttributeClusterRole.Replica) } case "OPCOUNTER_REPL_UPDATE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Update, AttributeClusterRole.Replica) } case "OPCOUNTER_REPL_DELETE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Delete, AttributeClusterRole.Replica) } case "OPCOUNTER_REPL_INSERT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.Insert, AttributeClusterRole.Replica) } // Average rate of documents returned, inserted, updated, or deleted per second during a selected time period. case "DOCUMENT_METRICS_RETURNED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Returned) } case "DOCUMENT_METRICS_INSERTED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Inserted) } case "DOCUMENT_METRICS_UPDATED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Updated) } case "DOCUMENT_METRICS_DELETED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbDocumentRateDataPoint(ts, float64(*dp.Value), AttributeDocumentStatus.Deleted) } // Average rate for operations per second during a selected time period that perform a sort but cannot perform the sort using an index. case "OPERATIONS_SCAN_AND_ORDER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsRateDataPoint(ts, float64(*dp.Value), AttributeOperation.ScanAndOrder, AttributeClusterRole.Primary) } // Average execution time in milliseconds per read, write, or command operation during a selected time period. case "OP_EXECUTION_TIME_READS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Reads) } case "OP_EXECUTION_TIME_WRITES": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Writes) } case "OP_EXECUTION_TIME_COMMANDS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbOperationsTimeDataPoint(ts, float64(*dp.Value), AttributeExecutionType.Commands) } // Number of times the host restarted within the previous hour. case "RESTARTS_IN_LAST_HOUR": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessRestartsDataPoint(ts, float64(*dp.Value)) } // Average rate per second to scan index items during queries and query-plan evaluations found in the value of totalKeysExamined from the explain command. case "QUERY_EXECUTOR_SCANNED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedType.IndexItems) } // Average rate of documents scanned per second during queries and query-plan evaluations found in the value of totalDocsExamined from the explain command. case "QUERY_EXECUTOR_SCANNED_OBJECTS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbQueryExecutorScannedDataPoint(ts, float64(*dp.Value), AttributeScannedType.Objects) } // Ratio of the number of index items scanned to the number of documents returned. case "QUERY_TARGETING_SCANNED_PER_RETURNED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedType.IndexItems) } // Ratio of the number of documents scanned to the number of documents returned. case "QUERY_TARGETING_SCANNED_OBJECTS_PER_RETURNED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasProcessDbQueryTargetingScannedPerReturnedDataPoint(ts, float64(*dp.Value), AttributeScannedType.Objects) } // CPU usage of processes on the host. For hosts with more than one CPU core, this value can exceed 100%. case "SYSTEM_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "MAX_SYSTEM_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "SYSTEM_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "MAX_SYSTEM_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "SYSTEM_CPU_NICE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) } case "MAX_SYSTEM_CPU_NICE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) } case "SYSTEM_CPU_IOWAIT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) } case "MAX_SYSTEM_CPU_IOWAIT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) } case "SYSTEM_CPU_IRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) } case "MAX_SYSTEM_CPU_IRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) } case "SYSTEM_CPU_SOFTIRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) } case "MAX_SYSTEM_CPU_SOFTIRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) } case "SYSTEM_CPU_GUEST": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) } case "MAX_SYSTEM_CPU_GUEST": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) } case "SYSTEM_CPU_STEAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) } case "MAX_SYSTEM_CPU_STEAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) } // CPU usage of processes on the host scaled to a range of 0 to 100% by dividing by the number of CPU cores. case "SYSTEM_NORMALIZED_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "MAX_SYSTEM_NORMALIZED_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "MAX_SYSTEM_NORMALIZED_CPU_NICE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) } case "SYSTEM_NORMALIZED_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "MAX_SYSTEM_NORMALIZED_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "SYSTEM_NORMALIZED_CPU_NICE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Nice) } case "SYSTEM_NORMALIZED_CPU_IOWAIT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) } case "MAX_SYSTEM_NORMALIZED_CPU_IOWAIT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Iowait) } case "SYSTEM_NORMALIZED_CPU_IRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) } case "MAX_SYSTEM_NORMALIZED_CPU_IRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Irq) } case "SYSTEM_NORMALIZED_CPU_SOFTIRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) } case "MAX_SYSTEM_NORMALIZED_CPU_SOFTIRQ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Softirq) } case "SYSTEM_NORMALIZED_CPU_GUEST": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) } case "MAX_SYSTEM_NORMALIZED_CPU_GUEST": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Guest) } case "SYSTEM_NORMALIZED_CPU_STEAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageAverageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) } case "MAX_SYSTEM_NORMALIZED_CPU_STEAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemCPUNormalizedUsageMaxDataPoint(ts, float64(*dp.Value), AttributeCPUState.Steal) } // Physical memory usage, in bytes, that the host uses. case "SYSTEM_MEMORY_AVAILABLE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Available) } case "MAX_SYSTEM_MEMORY_AVAILABLE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Available) } case "SYSTEM_MEMORY_BUFFERS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Buffers) } case "MAX_SYSTEM_MEMORY_BUFFERS": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Buffers) } case "SYSTEM_MEMORY_CACHED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Cached) } case "MAX_SYSTEM_MEMORY_CACHED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Cached) } case "SYSTEM_MEMORY_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Free) } case "MAX_SYSTEM_MEMORY_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Free) } case "SYSTEM_MEMORY_SHARED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Shared) } case "MAX_SYSTEM_MEMORY_SHARED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Shared) } case "SYSTEM_MEMORY_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Used) } case "MAX_SYSTEM_MEMORY_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemMemoryUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryStatus.Used) } // Average rate of physical bytes per second that the eth0 network interface received and transmitted. case "SYSTEM_NETWORK_IN": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) } case "MAX_SYSTEM_NETWORK_IN": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) } case "SYSTEM_NETWORK_OUT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemNetworkIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) } case "MAX_SYSTEM_NETWORK_OUT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemNetworkIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) } // Total amount of memory that swap uses. case "SWAP_USAGE_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Used) } case "MAX_SWAP_USAGE_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Used) } case "SWAP_USAGE_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingUsageAverageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Free) } case "MAX_SWAP_USAGE_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingUsageMaxDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Free) } // Total amount of memory written and read from swap. case "SWAP_IO_IN": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) } case "MAX_SWAP_IO_IN": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Receive) } case "SWAP_IO_OUT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingIoAverageDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) } case "MAX_SWAP_IO_OUT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemPagingIoMaxDataPoint(ts, float64(*dp.Value), AttributeDirection.Transmit) } // Memory usage, in bytes, that Atlas Search processes use. case "FTS_PROCESS_RESIDENT_MEMORY": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Resident) } case "FTS_PROCESS_VIRTUAL_MEMORY": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Virtual) } case "FTS_PROCESS_SHARED_MEMORY": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Shared) } case "FTS_MEMORY_MAPPED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsMemoryUsageDataPoint(ts, float64(*dp.Value), AttributeMemoryState.Mapped) } // Disk space, in bytes, that Atlas Search indexes use. case "FTS_DISK_USAGE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsDiskUsedDataPoint(ts, float64(*dp.Value)) } // Percentage of CPU that Atlas Search processes use. case "FTS_PROCESS_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "FTS_PROCESS_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsCPUUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } case "FTS_PROCESS_NORMALIZED_CPU_USER": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.User) } case "FTS_PROCESS_NORMALIZED_CPU_KERNEL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasSystemFtsCPUNormalizedUsageDataPoint(ts, float64(*dp.Value), AttributeCPUState.Kernel) } @@ -716,130 +716,130 @@ func getRecordFunc(metricName string) metricRecordFunc { // Measures throughput of I/O operations for the disk partition used for MongoDB. case "DISK_PARTITION_IOPS_READ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) } case "MAX_DISK_PARTITION_IOPS_READ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) } case "DISK_PARTITION_IOPS_WRITE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) } case "MAX_DISK_PARTITION_IOPS_WRITE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) } case "DISK_PARTITION_IOPS_TOTAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionIopsAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Total) } case "MAX_DISK_PARTITION_IOPS_TOTAL": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionIopsMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Total) } // The percentage of time during which requests are being issued to and serviced by the partition. // This includes requests from any process, not just MongoDB processes. case "DISK_PARTITION_LATENCY_READ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) } case "MAX_DISK_PARTITION_LATENCY_READ": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Read) } case "DISK_PARTITION_LATENCY_WRITE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionLatencyAverageDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) } case "MAX_DISK_PARTITION_LATENCY_WRITE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionLatencyMaxDataPoint(ts, float64(*dp.Value), AttributeDiskDirection.Write) } // Measures latency per operation type of the disk partition used by MongoDB. case "DISK_PARTITION_SPACE_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) } case "MAX_DISK_PARTITION_SPACE_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) } case "DISK_PARTITION_SPACE_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionSpaceAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) } case "MAX_DISK_PARTITION_SPACE_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionSpaceMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) } case "DISK_PARTITION_SPACE_PERCENT_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) } case "MAX_DISK_PARTITION_SPACE_PERCENT_FREE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Free) } case "DISK_PARTITION_SPACE_PERCENT_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionUtilizationAverageDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) } case "MAX_DISK_PARTITION_SPACE_PERCENT_USED": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDiskPartitionUtilizationMaxDataPoint(ts, float64(*dp.Value), AttributeDiskStatus.Used) } // Process Database Measurements (https://docs.atlas.mongodb.com/reference/api/process-disks-measurements/) case "DATABASE_COLLECTION_COUNT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Collection) } case "DATABASE_INDEX_COUNT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Index) } case "DATABASE_EXTENT_COUNT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Extent) } case "DATABASE_OBJECT_COUNT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.Object) } case "DATABASE_VIEW_COUNT": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbCountsDataPoint(ts, float64(*dp.Value), AttributeObjectType.View) } case "DATABASE_AVERAGE_OBJECT_SIZE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Object) } case "DATABASE_STORAGE_SIZE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Storage) } case "DATABASE_INDEX_SIZE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Index) } case "DATABASE_DATA_SIZE": - return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pdata.Timestamp) { + return func(mb *MetricsBuilder, dp *mongodbatlas.DataPoints, ts pcommon.Timestamp) { mb.RecordMongodbatlasDbSizeDataPoint(ts, float64(*dp.Value), AttributeObjectType.Data) } @@ -864,7 +864,7 @@ func addDataPoint(mb *MetricsBuilder, meas *mongodbatlas.Measurements, recordFun if err != nil { return err } - recordFunc(mb, point, pdata.NewTimestampFromTime(curTime)) + recordFunc(mb, point, pcommon.NewTimestampFromTime(curTime)) } } return nil diff --git a/receiver/mongodbatlasreceiver/receiver.go b/receiver/mongodbatlasreceiver/receiver.go index 5a6c1da8da15..ba9057c9dd48 100644 --- a/receiver/mongodbatlasreceiver/receiver.go +++ b/receiver/mongodbatlasreceiver/receiver.go @@ -21,7 +21,7 @@ import ( "github.com/pkg/errors" "go.mongodb.org/atlas/mongodbatlas" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" @@ -52,10 +52,10 @@ func newMongoDBAtlasScraper(log *zap.Logger, cfg *Config) (scraperhelper.Scraper return scraperhelper.NewScraper(typeStr, recv.scrape, scraperhelper.WithShutdown(recv.shutdown)) } -func (s *receiver) scrape(ctx context.Context) (pdata.Metrics, error) { +func (s *receiver) scrape(ctx context.Context) (pmetric.Metrics, error) { now := time.Now() if err := s.poll(ctx, s.timeConstraints(now)); err != nil { - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } s.lastRun = now return s.mb.Emit(), nil diff --git a/receiver/mongodbreceiver/go.mod b/receiver/mongodbreceiver/go.mod index 4a1cb0c59ee0..dacdc7734b57 100644 --- a/receiver/mongodbreceiver/go.mod +++ b/receiver/mongodbreceiver/go.mod @@ -4,8 +4,7 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 ) @@ -13,6 +12,7 @@ require ( github.com/hashicorp/go-version v1.4.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 go.mongodb.org/mongo-driver v1.9.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -21,11 +21,10 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/go-cmp v0.5.7 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -34,7 +33,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect github.com/xdg-go/pbkdf2 v1.0.0 // indirect github.com/xdg-go/scram v1.1.1 // indirect @@ -46,15 +44,12 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/mongodbreceiver/go.sum b/receiver/mongodbreceiver/go.sum index 66c4d65069a0..c5a45854994c 100644 --- a/receiver/mongodbreceiver/go.sum +++ b/receiver/mongodbreceiver/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -19,16 +16,8 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -36,16 +25,12 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -64,18 +49,13 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -87,12 +67,10 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -126,8 +104,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -168,14 +146,10 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -183,7 +157,6 @@ github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -207,18 +180,17 @@ go.mongodb.org/mongo-driver v1.9.0 h1:f3aLGJvQmBl8d9S40IL+jEyBC6hfLPbJjv9t5hEM9c go.mongodb.org/mongo-driver v1.9.0/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -245,20 +217,15 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -276,15 +243,13 @@ golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -314,22 +279,14 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -339,11 +296,7 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -351,8 +304,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/receiver/mongodbreceiver/internal/metadata/emitters.go b/receiver/mongodbreceiver/internal/metadata/emitters.go index a0ef371af7ad..8912ed0bcb4f 100644 --- a/receiver/mongodbreceiver/internal/metadata/emitters.go +++ b/receiver/mongodbreceiver/internal/metadata/emitters.go @@ -14,9 +14,11 @@ package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/mongodbreceiver/internal/metadata" -import "go.opentelemetry.io/collector/model/pdata" +import ( + "go.opentelemetry.io/collector/pdata/pmetric" +) -func (mb *MetricsBuilder) EmitDatabase(metrics pdata.MetricSlice) { +func (mb *MetricsBuilder) EmitDatabase(metrics pmetric.MetricSlice) { // mb.metricMongodbCacheOperations.emit(metrics) mb.metricMongodbCollectionCount.emit(metrics) mb.metricMongodbConnectionCount.emit(metrics) @@ -30,7 +32,7 @@ func (mb *MetricsBuilder) EmitDatabase(metrics pdata.MetricSlice) { mb.metricMongodbStorageSize.emit(metrics) } -func (mb *MetricsBuilder) EmitAdmin(metrics pdata.MetricSlice) { +func (mb *MetricsBuilder) EmitAdmin(metrics pmetric.MetricSlice) { mb.metricMongodbGlobalLockTime.emit(metrics) mb.metricMongodbOperationCount.emit(metrics) mb.metricMongodbCacheOperations.emit(metrics) diff --git a/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go index 083ee14bd0b4..e411c73232a5 100644 --- a/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/mongodbreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -71,7 +72,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricMongodbCacheOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -81,13 +82,13 @@ func (m *metricMongodbCacheOperations) init() { m.data.SetName("mongodb.cache.operations") m.data.SetDescription("The number of cache operations of the instance.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbCacheOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, typeAttributeValue string) { +func (m *metricMongodbCacheOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, typeAttributeValue string) { if !m.settings.Enabled { return } @@ -95,7 +96,7 @@ func (m *metricMongodbCacheOperations) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Type, pdata.NewValueString(typeAttributeValue)) + dp.Attributes().Insert(A.Type, pcommon.NewValueString(typeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -106,7 +107,7 @@ func (m *metricMongodbCacheOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbCacheOperations) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbCacheOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -117,14 +118,14 @@ func (m *metricMongodbCacheOperations) emit(metrics pdata.MetricSlice) { func newMetricMongodbCacheOperations(settings MetricSettings) metricMongodbCacheOperations { m := metricMongodbCacheOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbCollectionCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -134,13 +135,13 @@ func (m *metricMongodbCollectionCount) init() { m.data.SetName("mongodb.collection.count") m.data.SetDescription("The number of collections.") m.data.SetUnit("{collections}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbCollectionCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricMongodbCollectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -148,7 +149,7 @@ func (m *metricMongodbCollectionCount) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -159,7 +160,7 @@ func (m *metricMongodbCollectionCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbCollectionCount) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbCollectionCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -170,14 +171,14 @@ func (m *metricMongodbCollectionCount) emit(metrics pdata.MetricSlice) { func newMetricMongodbCollectionCount(settings MetricSettings) metricMongodbCollectionCount { m := metricMongodbCollectionCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbConnectionCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -187,13 +188,13 @@ func (m *metricMongodbConnectionCount) init() { m.data.SetName("mongodb.connection.count") m.data.SetDescription("The number of connections.") m.data.SetUnit("{connections}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbConnectionCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue string) { +func (m *metricMongodbConnectionCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -201,8 +202,8 @@ func (m *metricMongodbConnectionCount) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) - dp.Attributes().Insert(A.ConnectionType, pdata.NewValueString(connectionTypeAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.ConnectionType, pcommon.NewValueString(connectionTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -213,7 +214,7 @@ func (m *metricMongodbConnectionCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbConnectionCount) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbConnectionCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -224,14 +225,14 @@ func (m *metricMongodbConnectionCount) emit(metrics pdata.MetricSlice) { func newMetricMongodbConnectionCount(settings MetricSettings) metricMongodbConnectionCount { m := metricMongodbConnectionCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbDataSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -241,13 +242,13 @@ func (m *metricMongodbDataSize) init() { m.data.SetName("mongodb.data.size") m.data.SetDescription("The size of the collection. Data compression does not affect this value.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbDataSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricMongodbDataSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -255,7 +256,7 @@ func (m *metricMongodbDataSize) recordDataPoint(start pdata.Timestamp, ts pdata. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -266,7 +267,7 @@ func (m *metricMongodbDataSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbDataSize) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbDataSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -277,14 +278,14 @@ func (m *metricMongodbDataSize) emit(metrics pdata.MetricSlice) { func newMetricMongodbDataSize(settings MetricSettings) metricMongodbDataSize { m := metricMongodbDataSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbExtentCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -294,13 +295,13 @@ func (m *metricMongodbExtentCount) init() { m.data.SetName("mongodb.extent.count") m.data.SetDescription("The number of extents.") m.data.SetUnit("{extents}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbExtentCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricMongodbExtentCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -308,7 +309,7 @@ func (m *metricMongodbExtentCount) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -319,7 +320,7 @@ func (m *metricMongodbExtentCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbExtentCount) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbExtentCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -330,14 +331,14 @@ func (m *metricMongodbExtentCount) emit(metrics pdata.MetricSlice) { func newMetricMongodbExtentCount(settings MetricSettings) metricMongodbExtentCount { m := metricMongodbExtentCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbGlobalLockTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -347,12 +348,12 @@ func (m *metricMongodbGlobalLockTime) init() { m.data.SetName("mongodb.global_lock.time") m.data.SetDescription("The time the global lock has been held.") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricMongodbGlobalLockTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricMongodbGlobalLockTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -370,7 +371,7 @@ func (m *metricMongodbGlobalLockTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbGlobalLockTime) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbGlobalLockTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -381,14 +382,14 @@ func (m *metricMongodbGlobalLockTime) emit(metrics pdata.MetricSlice) { func newMetricMongodbGlobalLockTime(settings MetricSettings) metricMongodbGlobalLockTime { m := metricMongodbGlobalLockTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbIndexCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -398,13 +399,13 @@ func (m *metricMongodbIndexCount) init() { m.data.SetName("mongodb.index.count") m.data.SetDescription("The number of indexes.") m.data.SetUnit("{indexes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbIndexCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricMongodbIndexCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -412,7 +413,7 @@ func (m *metricMongodbIndexCount) recordDataPoint(start pdata.Timestamp, ts pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -423,7 +424,7 @@ func (m *metricMongodbIndexCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbIndexCount) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbIndexCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -434,14 +435,14 @@ func (m *metricMongodbIndexCount) emit(metrics pdata.MetricSlice) { func newMetricMongodbIndexCount(settings MetricSettings) metricMongodbIndexCount { m := metricMongodbIndexCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbIndexSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -451,13 +452,13 @@ func (m *metricMongodbIndexSize) init() { m.data.SetName("mongodb.index.size") m.data.SetDescription("Sum of the space allocated to all indexes in the database, including free index space.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbIndexSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricMongodbIndexSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -465,7 +466,7 @@ func (m *metricMongodbIndexSize) recordDataPoint(start pdata.Timestamp, ts pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -476,7 +477,7 @@ func (m *metricMongodbIndexSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbIndexSize) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbIndexSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -487,14 +488,14 @@ func (m *metricMongodbIndexSize) emit(metrics pdata.MetricSlice) { func newMetricMongodbIndexSize(settings MetricSettings) metricMongodbIndexSize { m := metricMongodbIndexSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbMemoryUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -504,13 +505,13 @@ func (m *metricMongodbMemoryUsage) init() { m.data.SetName("mongodb.memory.usage") m.data.SetDescription("The amount of memory used.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue string) { +func (m *metricMongodbMemoryUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue string) { if !m.settings.Enabled { return } @@ -518,8 +519,8 @@ func (m *metricMongodbMemoryUsage) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) - dp.Attributes().Insert(A.MemoryType, pdata.NewValueString(memoryTypeAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.MemoryType, pcommon.NewValueString(memoryTypeAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -530,7 +531,7 @@ func (m *metricMongodbMemoryUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbMemoryUsage) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbMemoryUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -541,14 +542,14 @@ func (m *metricMongodbMemoryUsage) emit(metrics pdata.MetricSlice) { func newMetricMongodbMemoryUsage(settings MetricSettings) metricMongodbMemoryUsage { m := metricMongodbMemoryUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbObjectCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -558,13 +559,13 @@ func (m *metricMongodbObjectCount) init() { m.data.SetName("mongodb.object.count") m.data.SetDescription("The number of objects.") m.data.SetUnit("{objects}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbObjectCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricMongodbObjectCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -572,7 +573,7 @@ func (m *metricMongodbObjectCount) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -583,7 +584,7 @@ func (m *metricMongodbObjectCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbObjectCount) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbObjectCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -594,14 +595,14 @@ func (m *metricMongodbObjectCount) emit(metrics pdata.MetricSlice) { func newMetricMongodbObjectCount(settings MetricSettings) metricMongodbObjectCount { m := metricMongodbObjectCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbOperationCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -611,13 +612,13 @@ func (m *metricMongodbOperationCount) init() { m.data.SetName("mongodb.operation.count") m.data.SetDescription("The number of operations executed.") m.data.SetUnit("{operations}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbOperationCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (m *metricMongodbOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.settings.Enabled { return } @@ -625,7 +626,7 @@ func (m *metricMongodbOperationCount) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -636,7 +637,7 @@ func (m *metricMongodbOperationCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbOperationCount) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbOperationCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -647,14 +648,14 @@ func (m *metricMongodbOperationCount) emit(metrics pdata.MetricSlice) { func newMetricMongodbOperationCount(settings MetricSettings) metricMongodbOperationCount { m := metricMongodbOperationCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMongodbStorageSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -664,13 +665,13 @@ func (m *metricMongodbStorageSize) init() { m.data.SetName("mongodb.storage.size") m.data.SetDescription("The total amount of storage allocated to this collection.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMongodbStorageSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricMongodbStorageSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -678,7 +679,7 @@ func (m *metricMongodbStorageSize) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -689,7 +690,7 @@ func (m *metricMongodbStorageSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMongodbStorageSize) emit(metrics pdata.MetricSlice) { +func (m *metricMongodbStorageSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -700,7 +701,7 @@ func (m *metricMongodbStorageSize) emit(metrics pdata.MetricSlice) { func newMetricMongodbStorageSize(settings MetricSettings) metricMongodbStorageSize { m := metricMongodbStorageSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -709,10 +710,10 @@ func newMetricMongodbStorageSize(settings MetricSettings) metricMongodbStorageSi // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricMongodbCacheOperations metricMongodbCacheOperations metricMongodbCollectionCount metricMongodbCollectionCount metricMongodbConnectionCount metricMongodbConnectionCount @@ -731,7 +732,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -739,8 +740,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricMongodbCacheOperations: newMetricMongodbCacheOperations(settings.MongodbCacheOperations), metricMongodbCollectionCount: newMetricMongodbCollectionCount(settings.MongodbCollectionCount), metricMongodbConnectionCount: newMetricMongodbConnectionCount(settings.MongodbConnectionCount), @@ -761,7 +762,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -771,14 +772,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -807,77 +808,77 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordMongodbCacheOperationsDataPoint adds a data point to mongodb.cache.operations metric. -func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pdata.Timestamp, val int64, typeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbCacheOperationsDataPoint(ts pcommon.Timestamp, val int64, typeAttributeValue string) { mb.metricMongodbCacheOperations.recordDataPoint(mb.startTime, ts, val, typeAttributeValue) } // RecordMongodbCollectionCountDataPoint adds a data point to mongodb.collection.count metric. -func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbCollectionCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbCollectionCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbConnectionCountDataPoint adds a data point to mongodb.connection.count metric. -func (mb *MetricsBuilder) RecordMongodbConnectionCountDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbConnectionCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, connectionTypeAttributeValue string) { mb.metricMongodbConnectionCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, connectionTypeAttributeValue) } // RecordMongodbDataSizeDataPoint adds a data point to mongodb.data.size metric. -func (mb *MetricsBuilder) RecordMongodbDataSizeDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbDataSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbDataSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbExtentCountDataPoint adds a data point to mongodb.extent.count metric. -func (mb *MetricsBuilder) RecordMongodbExtentCountDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbExtentCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbExtentCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbGlobalLockTimeDataPoint adds a data point to mongodb.global_lock.time metric. -func (mb *MetricsBuilder) RecordMongodbGlobalLockTimeDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordMongodbGlobalLockTimeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMongodbGlobalLockTime.recordDataPoint(mb.startTime, ts, val) } // RecordMongodbIndexCountDataPoint adds a data point to mongodb.index.count metric. -func (mb *MetricsBuilder) RecordMongodbIndexCountDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbIndexCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbIndexCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbIndexSizeDataPoint adds a data point to mongodb.index.size metric. -func (mb *MetricsBuilder) RecordMongodbIndexSizeDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbIndexSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbIndexSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbMemoryUsageDataPoint adds a data point to mongodb.memory.usage metric. -func (mb *MetricsBuilder) RecordMongodbMemoryUsageDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbMemoryUsageDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, memoryTypeAttributeValue string) { mb.metricMongodbMemoryUsage.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, memoryTypeAttributeValue) } // RecordMongodbObjectCountDataPoint adds a data point to mongodb.object.count metric. -func (mb *MetricsBuilder) RecordMongodbObjectCountDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbObjectCountDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbObjectCount.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordMongodbOperationCountDataPoint adds a data point to mongodb.operation.count metric. -func (mb *MetricsBuilder) RecordMongodbOperationCountDataPoint(ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) { mb.metricMongodbOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue) } // RecordMongodbStorageSizeDataPoint adds a data point to mongodb.storage.size metric. -func (mb *MetricsBuilder) RecordMongodbStorageSizeDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordMongodbStorageSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricMongodbStorageSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/mongodbreceiver/metrics.go b/receiver/mongodbreceiver/metrics.go index 6d1b814df30f..519bac193666 100644 --- a/receiver/mongodbreceiver/metrics.go +++ b/receiver/mongodbreceiver/metrics.go @@ -21,7 +21,7 @@ import ( "github.com/hashicorp/go-version" "go.mongodb.org/mongo-driver/bson" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -29,7 +29,7 @@ import ( ) // DBStats -func (s *mongodbScraper) recordCollections(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordCollections(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { collectionsPath := []string{"collections"} collections, err := dig(doc, collectionsPath) if err != nil { @@ -44,7 +44,7 @@ func (s *mongodbScraper) recordCollections(now pdata.Timestamp, doc bson.M, dbNa s.mb.RecordMongodbCollectionCountDataPoint(now, collectionsVal, dbName) } -func (s *mongodbScraper) recordDataSize(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordDataSize(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { dataSizePath := []string{"dataSize"} dataSize, err := dig(doc, dataSizePath) if err != nil { @@ -59,7 +59,7 @@ func (s *mongodbScraper) recordDataSize(now pdata.Timestamp, doc bson.M, dbName s.mb.RecordMongodbDataSizeDataPoint(now, dataSizeVal, dbName) } -func (s *mongodbScraper) recordStorageSize(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordStorageSize(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { storageSizePath := []string{"storageSize"} storageSize, err := dig(doc, storageSizePath) if err != nil { @@ -74,7 +74,7 @@ func (s *mongodbScraper) recordStorageSize(now pdata.Timestamp, doc bson.M, dbNa s.mb.RecordMongodbStorageSizeDataPoint(now, storageSizeValue, dbName) } -func (s *mongodbScraper) recordObjectCount(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordObjectCount(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { objectsPath := []string{"objects"} objects, err := dig(doc, objectsPath) if err != nil { @@ -89,7 +89,7 @@ func (s *mongodbScraper) recordObjectCount(now pdata.Timestamp, doc bson.M, dbNa s.mb.RecordMongodbObjectCountDataPoint(now, objectsVal, dbName) } -func (s *mongodbScraper) recordIndexCount(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordIndexCount(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { indexesPath := []string{"indexes"} indexes, err := dig(doc, indexesPath) if err != nil { @@ -104,7 +104,7 @@ func (s *mongodbScraper) recordIndexCount(now pdata.Timestamp, doc bson.M, dbNam s.mb.RecordMongodbIndexCountDataPoint(now, indexesVal, dbName) } -func (s *mongodbScraper) recordIndexSize(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordIndexSize(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { indexSizePath := []string{"indexSize"} indexSize, err := dig(doc, indexSizePath) if err != nil { @@ -119,7 +119,7 @@ func (s *mongodbScraper) recordIndexSize(now pdata.Timestamp, doc bson.M, dbName s.mb.RecordMongodbIndexSizeDataPoint(now, indexSizeVal, dbName) } -func (s *mongodbScraper) recordExtentCount(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordExtentCount(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { extentsPath := []string{"numExtents"} extents, err := dig(doc, extentsPath) if err != nil { @@ -135,7 +135,7 @@ func (s *mongodbScraper) recordExtentCount(now pdata.Timestamp, doc bson.M, dbNa } // ServerStatus -func (s *mongodbScraper) recordConnections(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordConnections(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { types := []string{ metadata.AttributeConnectionType.Active, metadata.AttributeConnectionType.Available, @@ -158,7 +158,7 @@ func (s *mongodbScraper) recordConnections(now pdata.Timestamp, doc bson.M, dbNa } } -func (s *mongodbScraper) recordMemoryUsage(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordMemoryUsage(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { types := []string{ metadata.AttributeMemoryType.Resident, metadata.AttributeMemoryType.Virtual, @@ -183,7 +183,7 @@ func (s *mongodbScraper) recordMemoryUsage(now pdata.Timestamp, doc bson.M, dbNa } // Admin Stats -func (s *mongodbScraper) recordOperations(now pdata.Timestamp, doc bson.M, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordOperations(now pcommon.Timestamp, doc bson.M, errors scrapererror.ScrapeErrors) { // Collect Operations for _, operation := range []string{ metadata.AttributeOperation.Insert, @@ -209,7 +209,7 @@ func (s *mongodbScraper) recordOperations(now pdata.Timestamp, doc bson.M, error } } -func (s *mongodbScraper) recordCacheOperations(now pdata.Timestamp, doc bson.M, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordCacheOperations(now pcommon.Timestamp, doc bson.M, errors scrapererror.ScrapeErrors) { // Collect Cache Hits & Misses canCalculateCacheHits := true @@ -246,7 +246,7 @@ func (s *mongodbScraper) recordCacheOperations(now pdata.Timestamp, doc bson.M, } } -func (s *mongodbScraper) recordGlobalLockTime(now pdata.Timestamp, doc bson.M, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordGlobalLockTime(now pcommon.Timestamp, doc bson.M, errors scrapererror.ScrapeErrors) { var heldTimeUs int64 // Mongo version greater than or equal to 4.0 have it in the serverStats at "globalLock", "totalTime" diff --git a/receiver/mongodbreceiver/scraper.go b/receiver/mongodbreceiver/scraper.go index 8cebef808b3b..7196f1610afc 100644 --- a/receiver/mongodbreceiver/scraper.go +++ b/receiver/mongodbreceiver/scraper.go @@ -23,7 +23,8 @@ import ( "github.com/hashicorp/go-version" "go.mongodb.org/mongo-driver/bson" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -73,8 +74,8 @@ func (s *mongodbScraper) shutdown(ctx context.Context) error { return nil } -func (s *mongodbScraper) scrape(ctx context.Context) (pdata.Metrics, error) { - metrics := pdata.NewMetrics() +func (s *mongodbScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { + metrics := pmetric.NewMetrics() rms := metrics.ResourceMetrics() if s.client == nil { @@ -84,7 +85,7 @@ func (s *mongodbScraper) scrape(ctx context.Context) (pdata.Metrics, error) { if s.mongoVersion == nil { version, err := s.client.GetVersion(ctx) if err != nil { - return pdata.NewMetrics(), fmt.Errorf("unable to determine version of mongo scraping against: %w", err) + return pmetric.NewMetrics(), fmt.Errorf("unable to determine version of mongo scraping against: %w", err) } s.mongoVersion = version } @@ -94,14 +95,14 @@ func (s *mongodbScraper) scrape(ctx context.Context) (pdata.Metrics, error) { return metrics, errors.Combine() } -func (s *mongodbScraper) collectMetrics(ctx context.Context, rms pdata.ResourceMetricsSlice, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) collectMetrics(ctx context.Context, rms pmetric.ResourceMetricsSlice, errors scrapererror.ScrapeErrors) { dbNames, err := s.client.ListDatabaseNames(ctx, bson.D{}) if err != nil { s.logger.Error("Failed to fetch database names", zap.Error(err)) return } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) s.collectAdminDatabase(ctx, rms, now, errors) for _, dbName := range dbNames { @@ -109,7 +110,7 @@ func (s *mongodbScraper) collectMetrics(ctx context.Context, rms pdata.ResourceM } } -func (s *mongodbScraper) collectDatabase(ctx context.Context, rms pdata.ResourceMetricsSlice, now pdata.Timestamp, databaseName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) collectDatabase(ctx context.Context, rms pmetric.ResourceMetricsSlice, now pcommon.Timestamp, databaseName string, errors scrapererror.ScrapeErrors) { rm := rms.AppendEmpty() resourceAttrs := rm.Resource().Attributes() resourceAttrs.InsertString(metadata.A.Database, databaseName) @@ -134,7 +135,7 @@ func (s *mongodbScraper) collectDatabase(ctx context.Context, rms pdata.Resource s.mb.EmitDatabase(ilms.Metrics()) } -func (s *mongodbScraper) collectAdminDatabase(ctx context.Context, rms pdata.ResourceMetricsSlice, now pdata.Timestamp, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) collectAdminDatabase(ctx context.Context, rms pmetric.ResourceMetricsSlice, now pcommon.Timestamp, errors scrapererror.ScrapeErrors) { rm := rms.AppendEmpty() ilms := rm.ScopeMetrics().AppendEmpty() ilms.Scope().SetName(instrumentationLibraryName) @@ -148,7 +149,7 @@ func (s *mongodbScraper) collectAdminDatabase(ctx context.Context, rms pdata.Res s.mb.EmitAdmin(ilms.Metrics()) } -func (s *mongodbScraper) recordDBStats(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordDBStats(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { s.recordCollections(now, doc, dbName, errors) s.recordDataSize(now, doc, dbName, errors) s.recordExtentCount(now, doc, dbName, errors) @@ -158,12 +159,12 @@ func (s *mongodbScraper) recordDBStats(now pdata.Timestamp, doc bson.M, dbName s s.recordStorageSize(now, doc, dbName, errors) } -func (s *mongodbScraper) recordNormalServerStats(now pdata.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordNormalServerStats(now pcommon.Timestamp, doc bson.M, dbName string, errors scrapererror.ScrapeErrors) { s.recordConnections(now, doc, dbName, errors) s.recordMemoryUsage(now, doc, dbName, errors) } -func (s *mongodbScraper) recordAdminStats(now pdata.Timestamp, document bson.M, errors scrapererror.ScrapeErrors) { +func (s *mongodbScraper) recordAdminStats(now pcommon.Timestamp, document bson.M, errors scrapererror.ScrapeErrors) { s.recordGlobalLockTime(now, document, errors) s.recordCacheOperations(now, document, errors) s.recordOperations(now, document, errors) diff --git a/receiver/mongodbreceiver/scraper_test.go b/receiver/mongodbreceiver/scraper_test.go index f5854dc0ea85..de91a59df951 100644 --- a/receiver/mongodbreceiver/scraper_test.go +++ b/receiver/mongodbreceiver/scraper_test.go @@ -24,7 +24,8 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.mongodb.org/mongo-driver/bson/primitive" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -115,11 +116,11 @@ func TestGlobalLockTimeOldFormat(t *testing.T) { }, } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) scraper.recordGlobalLockTime(now, doc, scrapererror.ScrapeErrors{}) expectedValue := (int64(116749+14340) / 1000) - metrics := pdata.NewMetricSlice() + metrics := pmetric.NewMetricSlice() scraper.mb.EmitAdmin(metrics) collectedValue := metrics.At(0).Sum().DataPoints().At(0).IntVal() require.Equal(t, expectedValue, collectedValue) diff --git a/receiver/mysqlreceiver/go.mod b/receiver/mysqlreceiver/go.mod index f6b6fe6a2dea..e942b515f038 100644 --- a/receiver/mysqlreceiver/go.mod +++ b/receiver/mysqlreceiver/go.mod @@ -6,18 +6,20 @@ require ( github.com/go-sql-driver/mysql v1.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/zap v1.21.0 ) -require github.com/testcontainers/testcontainers-go v0.13.0 +require ( + github.com/testcontainers/testcontainers-go v0.13.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 +) require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.23 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/containerd v1.5.9 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -30,7 +32,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -48,16 +50,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -66,3 +66,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/mysqlreceiver/go.sum b/receiver/mysqlreceiver/go.sum index 154bb01a2db1..4047b0703309 100644 --- a/receiver/mysqlreceiver/go.sum +++ b/receiver/mysqlreceiver/go.sum @@ -102,8 +102,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -401,8 +402,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -467,8 +468,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -659,8 +660,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -729,15 +728,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -835,8 +834,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -926,8 +926,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -939,7 +939,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go index 13a33fb84e29..8d88cc164267 100644 --- a/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/mysqlreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -91,7 +92,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricMysqlBufferPoolDataPages struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -101,13 +102,13 @@ func (m *metricMysqlBufferPoolDataPages) init() { m.data.SetName("mysql.buffer_pool.data_pages") m.data.SetDescription("The number of data pages in the InnoDB buffer pool.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlBufferPoolDataPages) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, bufferPoolDataAttributeValue string) { +func (m *metricMysqlBufferPoolDataPages) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) { if !m.settings.Enabled { return } @@ -115,7 +116,7 @@ func (m *metricMysqlBufferPoolDataPages) recordDataPoint(start pdata.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.BufferPoolData, pdata.NewValueString(bufferPoolDataAttributeValue)) + dp.Attributes().Insert(A.BufferPoolData, pcommon.NewValueString(bufferPoolDataAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -126,7 +127,7 @@ func (m *metricMysqlBufferPoolDataPages) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlBufferPoolDataPages) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlBufferPoolDataPages) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -137,14 +138,14 @@ func (m *metricMysqlBufferPoolDataPages) emit(metrics pdata.MetricSlice) { func newMetricMysqlBufferPoolDataPages(settings MetricSettings) metricMysqlBufferPoolDataPages { m := metricMysqlBufferPoolDataPages{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlBufferPoolLimit struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -154,12 +155,12 @@ func (m *metricMysqlBufferPoolLimit) init() { m.data.SetName("mysql.buffer_pool.limit") m.data.SetDescription("The configured size of the InnoDB buffer pool.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricMysqlBufferPoolLimit) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricMysqlBufferPoolLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -177,7 +178,7 @@ func (m *metricMysqlBufferPoolLimit) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlBufferPoolLimit) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlBufferPoolLimit) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -188,14 +189,14 @@ func (m *metricMysqlBufferPoolLimit) emit(metrics pdata.MetricSlice) { func newMetricMysqlBufferPoolLimit(settings MetricSettings) metricMysqlBufferPoolLimit { m := metricMysqlBufferPoolLimit{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlBufferPoolOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -205,13 +206,13 @@ func (m *metricMysqlBufferPoolOperations) init() { m.data.SetName("mysql.buffer_pool.operations") m.data.SetDescription("The number of operations on the InnoDB buffer pool.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlBufferPoolOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, bufferPoolOperationsAttributeValue string) { +func (m *metricMysqlBufferPoolOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolOperationsAttributeValue string) { if !m.settings.Enabled { return } @@ -219,7 +220,7 @@ func (m *metricMysqlBufferPoolOperations) recordDataPoint(start pdata.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.BufferPoolOperations, pdata.NewValueString(bufferPoolOperationsAttributeValue)) + dp.Attributes().Insert(A.BufferPoolOperations, pcommon.NewValueString(bufferPoolOperationsAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -230,7 +231,7 @@ func (m *metricMysqlBufferPoolOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlBufferPoolOperations) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlBufferPoolOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -241,14 +242,14 @@ func (m *metricMysqlBufferPoolOperations) emit(metrics pdata.MetricSlice) { func newMetricMysqlBufferPoolOperations(settings MetricSettings) metricMysqlBufferPoolOperations { m := metricMysqlBufferPoolOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlBufferPoolPageFlushes struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -258,12 +259,12 @@ func (m *metricMysqlBufferPoolPageFlushes) init() { m.data.SetName("mysql.buffer_pool.page_flushes") m.data.SetDescription("The number of requests to flush pages from the InnoDB buffer pool.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricMysqlBufferPoolPageFlushes) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricMysqlBufferPoolPageFlushes) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -281,7 +282,7 @@ func (m *metricMysqlBufferPoolPageFlushes) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlBufferPoolPageFlushes) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlBufferPoolPageFlushes) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -292,14 +293,14 @@ func (m *metricMysqlBufferPoolPageFlushes) emit(metrics pdata.MetricSlice) { func newMetricMysqlBufferPoolPageFlushes(settings MetricSettings) metricMysqlBufferPoolPageFlushes { m := metricMysqlBufferPoolPageFlushes{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlBufferPoolPages struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -309,13 +310,13 @@ func (m *metricMysqlBufferPoolPages) init() { m.data.SetName("mysql.buffer_pool.pages") m.data.SetDescription("The number of pages in the InnoDB buffer pool.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlBufferPoolPages) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, bufferPoolPagesAttributeValue string) { +func (m *metricMysqlBufferPoolPages) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolPagesAttributeValue string) { if !m.settings.Enabled { return } @@ -323,7 +324,7 @@ func (m *metricMysqlBufferPoolPages) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.BufferPoolPages, pdata.NewValueString(bufferPoolPagesAttributeValue)) + dp.Attributes().Insert(A.BufferPoolPages, pcommon.NewValueString(bufferPoolPagesAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -334,7 +335,7 @@ func (m *metricMysqlBufferPoolPages) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlBufferPoolPages) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlBufferPoolPages) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -345,14 +346,14 @@ func (m *metricMysqlBufferPoolPages) emit(metrics pdata.MetricSlice) { func newMetricMysqlBufferPoolPages(settings MetricSettings) metricMysqlBufferPoolPages { m := metricMysqlBufferPoolPages{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlBufferPoolUsage struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -362,13 +363,13 @@ func (m *metricMysqlBufferPoolUsage) init() { m.data.SetName("mysql.buffer_pool.usage") m.data.SetDescription("The number of bytes in the InnoDB buffer pool.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlBufferPoolUsage) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, bufferPoolDataAttributeValue string) { +func (m *metricMysqlBufferPoolUsage) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) { if !m.settings.Enabled { return } @@ -376,7 +377,7 @@ func (m *metricMysqlBufferPoolUsage) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.BufferPoolData, pdata.NewValueString(bufferPoolDataAttributeValue)) + dp.Attributes().Insert(A.BufferPoolData, pcommon.NewValueString(bufferPoolDataAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -387,7 +388,7 @@ func (m *metricMysqlBufferPoolUsage) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlBufferPoolUsage) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlBufferPoolUsage) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -398,14 +399,14 @@ func (m *metricMysqlBufferPoolUsage) emit(metrics pdata.MetricSlice) { func newMetricMysqlBufferPoolUsage(settings MetricSettings) metricMysqlBufferPoolUsage { m := metricMysqlBufferPoolUsage{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlCommands struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -415,13 +416,13 @@ func (m *metricMysqlCommands) init() { m.data.SetName("mysql.commands") m.data.SetDescription("The number of times each type of command has been executed.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlCommands) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, commandAttributeValue string) { +func (m *metricMysqlCommands) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, commandAttributeValue string) { if !m.settings.Enabled { return } @@ -429,7 +430,7 @@ func (m *metricMysqlCommands) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Command, pdata.NewValueString(commandAttributeValue)) + dp.Attributes().Insert(A.Command, pcommon.NewValueString(commandAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -440,7 +441,7 @@ func (m *metricMysqlCommands) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlCommands) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlCommands) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -451,14 +452,14 @@ func (m *metricMysqlCommands) emit(metrics pdata.MetricSlice) { func newMetricMysqlCommands(settings MetricSettings) metricMysqlCommands { m := metricMysqlCommands{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlDoubleWrites struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -468,13 +469,13 @@ func (m *metricMysqlDoubleWrites) init() { m.data.SetName("mysql.double_writes") m.data.SetDescription("The number of writes to the InnoDB doublewrite buffer.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlDoubleWrites) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, doubleWritesAttributeValue string) { +func (m *metricMysqlDoubleWrites) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, doubleWritesAttributeValue string) { if !m.settings.Enabled { return } @@ -482,7 +483,7 @@ func (m *metricMysqlDoubleWrites) recordDataPoint(start pdata.Timestamp, ts pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.DoubleWrites, pdata.NewValueString(doubleWritesAttributeValue)) + dp.Attributes().Insert(A.DoubleWrites, pcommon.NewValueString(doubleWritesAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -493,7 +494,7 @@ func (m *metricMysqlDoubleWrites) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlDoubleWrites) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlDoubleWrites) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -504,14 +505,14 @@ func (m *metricMysqlDoubleWrites) emit(metrics pdata.MetricSlice) { func newMetricMysqlDoubleWrites(settings MetricSettings) metricMysqlDoubleWrites { m := metricMysqlDoubleWrites{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlHandlers struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -521,13 +522,13 @@ func (m *metricMysqlHandlers) init() { m.data.SetName("mysql.handlers") m.data.SetDescription("The number of requests to various MySQL handlers.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlHandlers) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, handlerAttributeValue string) { +func (m *metricMysqlHandlers) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, handlerAttributeValue string) { if !m.settings.Enabled { return } @@ -535,7 +536,7 @@ func (m *metricMysqlHandlers) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Handler, pdata.NewValueString(handlerAttributeValue)) + dp.Attributes().Insert(A.Handler, pcommon.NewValueString(handlerAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -546,7 +547,7 @@ func (m *metricMysqlHandlers) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlHandlers) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlHandlers) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -557,14 +558,14 @@ func (m *metricMysqlHandlers) emit(metrics pdata.MetricSlice) { func newMetricMysqlHandlers(settings MetricSettings) metricMysqlHandlers { m := metricMysqlHandlers{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlLocks struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -574,13 +575,13 @@ func (m *metricMysqlLocks) init() { m.data.SetName("mysql.locks") m.data.SetDescription("The number of MySQL locks.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlLocks) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, locksAttributeValue string) { +func (m *metricMysqlLocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, locksAttributeValue string) { if !m.settings.Enabled { return } @@ -588,7 +589,7 @@ func (m *metricMysqlLocks) recordDataPoint(start pdata.Timestamp, ts pdata.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Locks, pdata.NewValueString(locksAttributeValue)) + dp.Attributes().Insert(A.Locks, pcommon.NewValueString(locksAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -599,7 +600,7 @@ func (m *metricMysqlLocks) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlLocks) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlLocks) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -610,14 +611,14 @@ func (m *metricMysqlLocks) emit(metrics pdata.MetricSlice) { func newMetricMysqlLocks(settings MetricSettings) metricMysqlLocks { m := metricMysqlLocks{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlLogOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -627,13 +628,13 @@ func (m *metricMysqlLogOperations) init() { m.data.SetName("mysql.log_operations") m.data.SetDescription("The number of InndoDB log operations.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlLogOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, logOperationsAttributeValue string) { +func (m *metricMysqlLogOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, logOperationsAttributeValue string) { if !m.settings.Enabled { return } @@ -641,7 +642,7 @@ func (m *metricMysqlLogOperations) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.LogOperations, pdata.NewValueString(logOperationsAttributeValue)) + dp.Attributes().Insert(A.LogOperations, pcommon.NewValueString(logOperationsAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -652,7 +653,7 @@ func (m *metricMysqlLogOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlLogOperations) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlLogOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -663,14 +664,14 @@ func (m *metricMysqlLogOperations) emit(metrics pdata.MetricSlice) { func newMetricMysqlLogOperations(settings MetricSettings) metricMysqlLogOperations { m := metricMysqlLogOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -680,13 +681,13 @@ func (m *metricMysqlOperations) init() { m.data.SetName("mysql.operations") m.data.SetDescription("The number of InndoDB operations.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationsAttributeValue string) { +func (m *metricMysqlOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationsAttributeValue string) { if !m.settings.Enabled { return } @@ -694,7 +695,7 @@ func (m *metricMysqlOperations) recordDataPoint(start pdata.Timestamp, ts pdata. dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Operations, pdata.NewValueString(operationsAttributeValue)) + dp.Attributes().Insert(A.Operations, pcommon.NewValueString(operationsAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -705,7 +706,7 @@ func (m *metricMysqlOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlOperations) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -716,14 +717,14 @@ func (m *metricMysqlOperations) emit(metrics pdata.MetricSlice) { func newMetricMysqlOperations(settings MetricSettings) metricMysqlOperations { m := metricMysqlOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlPageOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -733,13 +734,13 @@ func (m *metricMysqlPageOperations) init() { m.data.SetName("mysql.page_operations") m.data.SetDescription("The number of InndoDB page operations.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlPageOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, pageOperationsAttributeValue string) { +func (m *metricMysqlPageOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, pageOperationsAttributeValue string) { if !m.settings.Enabled { return } @@ -747,7 +748,7 @@ func (m *metricMysqlPageOperations) recordDataPoint(start pdata.Timestamp, ts pd dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.PageOperations, pdata.NewValueString(pageOperationsAttributeValue)) + dp.Attributes().Insert(A.PageOperations, pcommon.NewValueString(pageOperationsAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -758,7 +759,7 @@ func (m *metricMysqlPageOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlPageOperations) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlPageOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -769,14 +770,14 @@ func (m *metricMysqlPageOperations) emit(metrics pdata.MetricSlice) { func newMetricMysqlPageOperations(settings MetricSettings) metricMysqlPageOperations { m := metricMysqlPageOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlRowLocks struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -786,13 +787,13 @@ func (m *metricMysqlRowLocks) init() { m.data.SetName("mysql.row_locks") m.data.SetDescription("The number of InndoDB row locks.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlRowLocks) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, rowLocksAttributeValue string) { +func (m *metricMysqlRowLocks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, rowLocksAttributeValue string) { if !m.settings.Enabled { return } @@ -800,7 +801,7 @@ func (m *metricMysqlRowLocks) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.RowLocks, pdata.NewValueString(rowLocksAttributeValue)) + dp.Attributes().Insert(A.RowLocks, pcommon.NewValueString(rowLocksAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -811,7 +812,7 @@ func (m *metricMysqlRowLocks) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlRowLocks) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlRowLocks) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -822,14 +823,14 @@ func (m *metricMysqlRowLocks) emit(metrics pdata.MetricSlice) { func newMetricMysqlRowLocks(settings MetricSettings) metricMysqlRowLocks { m := metricMysqlRowLocks{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlRowOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -839,13 +840,13 @@ func (m *metricMysqlRowOperations) init() { m.data.SetName("mysql.row_operations") m.data.SetDescription("The number of InndoDB row operations.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlRowOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, rowOperationsAttributeValue string) { +func (m *metricMysqlRowOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, rowOperationsAttributeValue string) { if !m.settings.Enabled { return } @@ -853,7 +854,7 @@ func (m *metricMysqlRowOperations) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.RowOperations, pdata.NewValueString(rowOperationsAttributeValue)) + dp.Attributes().Insert(A.RowOperations, pcommon.NewValueString(rowOperationsAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -864,7 +865,7 @@ func (m *metricMysqlRowOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlRowOperations) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlRowOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -875,14 +876,14 @@ func (m *metricMysqlRowOperations) emit(metrics pdata.MetricSlice) { func newMetricMysqlRowOperations(settings MetricSettings) metricMysqlRowOperations { m := metricMysqlRowOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlSorts struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -892,13 +893,13 @@ func (m *metricMysqlSorts) init() { m.data.SetName("mysql.sorts") m.data.SetDescription("The number of MySQL sorts.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlSorts) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, sortsAttributeValue string) { +func (m *metricMysqlSorts) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, sortsAttributeValue string) { if !m.settings.Enabled { return } @@ -906,7 +907,7 @@ func (m *metricMysqlSorts) recordDataPoint(start pdata.Timestamp, ts pdata.Times dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Sorts, pdata.NewValueString(sortsAttributeValue)) + dp.Attributes().Insert(A.Sorts, pcommon.NewValueString(sortsAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -917,7 +918,7 @@ func (m *metricMysqlSorts) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlSorts) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlSorts) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -928,14 +929,14 @@ func (m *metricMysqlSorts) emit(metrics pdata.MetricSlice) { func newMetricMysqlSorts(settings MetricSettings) metricMysqlSorts { m := metricMysqlSorts{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricMysqlThreads struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -945,13 +946,13 @@ func (m *metricMysqlThreads) init() { m.data.SetName("mysql.threads") m.data.SetDescription("The state of MySQL threads.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricMysqlThreads) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, threadsAttributeValue string) { +func (m *metricMysqlThreads) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, threadsAttributeValue string) { if !m.settings.Enabled { return } @@ -959,7 +960,7 @@ func (m *metricMysqlThreads) recordDataPoint(start pdata.Timestamp, ts pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Threads, pdata.NewValueString(threadsAttributeValue)) + dp.Attributes().Insert(A.Threads, pcommon.NewValueString(threadsAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -970,7 +971,7 @@ func (m *metricMysqlThreads) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricMysqlThreads) emit(metrics pdata.MetricSlice) { +func (m *metricMysqlThreads) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -981,7 +982,7 @@ func (m *metricMysqlThreads) emit(metrics pdata.MetricSlice) { func newMetricMysqlThreads(settings MetricSettings) metricMysqlThreads { m := metricMysqlThreads{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -990,10 +991,10 @@ func newMetricMysqlThreads(settings MetricSettings) metricMysqlThreads { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricMysqlBufferPoolDataPages metricMysqlBufferPoolDataPages metricMysqlBufferPoolLimit metricMysqlBufferPoolLimit metricMysqlBufferPoolOperations metricMysqlBufferPoolOperations @@ -1017,7 +1018,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -1025,8 +1026,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricMysqlBufferPoolDataPages: newMetricMysqlBufferPoolDataPages(settings.MysqlBufferPoolDataPages), metricMysqlBufferPoolLimit: newMetricMysqlBufferPoolLimit(settings.MysqlBufferPoolLimit), metricMysqlBufferPoolOperations: newMetricMysqlBufferPoolOperations(settings.MysqlBufferPoolOperations), @@ -1052,7 +1053,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -1062,14 +1063,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -1103,102 +1104,102 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordMysqlBufferPoolDataPagesDataPoint adds a data point to mysql.buffer_pool.data_pages metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolDataPagesDataPoint(ts pdata.Timestamp, val int64, bufferPoolDataAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlBufferPoolDataPagesDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) { mb.metricMysqlBufferPoolDataPages.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue) } // RecordMysqlBufferPoolLimitDataPoint adds a data point to mysql.buffer_pool.limit metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolLimitDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordMysqlBufferPoolLimitDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMysqlBufferPoolLimit.recordDataPoint(mb.startTime, ts, val) } // RecordMysqlBufferPoolOperationsDataPoint adds a data point to mysql.buffer_pool.operations metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pdata.Timestamp, val int64, bufferPoolOperationsAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlBufferPoolOperationsDataPoint(ts pcommon.Timestamp, val int64, bufferPoolOperationsAttributeValue string) { mb.metricMysqlBufferPoolOperations.recordDataPoint(mb.startTime, ts, val, bufferPoolOperationsAttributeValue) } // RecordMysqlBufferPoolPageFlushesDataPoint adds a data point to mysql.buffer_pool.page_flushes metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolPageFlushesDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordMysqlBufferPoolPageFlushesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricMysqlBufferPoolPageFlushes.recordDataPoint(mb.startTime, ts, val) } // RecordMysqlBufferPoolPagesDataPoint adds a data point to mysql.buffer_pool.pages metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pdata.Timestamp, val int64, bufferPoolPagesAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlBufferPoolPagesDataPoint(ts pcommon.Timestamp, val int64, bufferPoolPagesAttributeValue string) { mb.metricMysqlBufferPoolPages.recordDataPoint(mb.startTime, ts, val, bufferPoolPagesAttributeValue) } // RecordMysqlBufferPoolUsageDataPoint adds a data point to mysql.buffer_pool.usage metric. -func (mb *MetricsBuilder) RecordMysqlBufferPoolUsageDataPoint(ts pdata.Timestamp, val int64, bufferPoolDataAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlBufferPoolUsageDataPoint(ts pcommon.Timestamp, val int64, bufferPoolDataAttributeValue string) { mb.metricMysqlBufferPoolUsage.recordDataPoint(mb.startTime, ts, val, bufferPoolDataAttributeValue) } // RecordMysqlCommandsDataPoint adds a data point to mysql.commands metric. -func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pdata.Timestamp, val int64, commandAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlCommandsDataPoint(ts pcommon.Timestamp, val int64, commandAttributeValue string) { mb.metricMysqlCommands.recordDataPoint(mb.startTime, ts, val, commandAttributeValue) } // RecordMysqlDoubleWritesDataPoint adds a data point to mysql.double_writes metric. -func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pdata.Timestamp, val int64, doubleWritesAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlDoubleWritesDataPoint(ts pcommon.Timestamp, val int64, doubleWritesAttributeValue string) { mb.metricMysqlDoubleWrites.recordDataPoint(mb.startTime, ts, val, doubleWritesAttributeValue) } // RecordMysqlHandlersDataPoint adds a data point to mysql.handlers metric. -func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pdata.Timestamp, val int64, handlerAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlHandlersDataPoint(ts pcommon.Timestamp, val int64, handlerAttributeValue string) { mb.metricMysqlHandlers.recordDataPoint(mb.startTime, ts, val, handlerAttributeValue) } // RecordMysqlLocksDataPoint adds a data point to mysql.locks metric. -func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pdata.Timestamp, val int64, locksAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlLocksDataPoint(ts pcommon.Timestamp, val int64, locksAttributeValue string) { mb.metricMysqlLocks.recordDataPoint(mb.startTime, ts, val, locksAttributeValue) } // RecordMysqlLogOperationsDataPoint adds a data point to mysql.log_operations metric. -func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pdata.Timestamp, val int64, logOperationsAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlLogOperationsDataPoint(ts pcommon.Timestamp, val int64, logOperationsAttributeValue string) { mb.metricMysqlLogOperations.recordDataPoint(mb.startTime, ts, val, logOperationsAttributeValue) } // RecordMysqlOperationsDataPoint adds a data point to mysql.operations metric. -func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pdata.Timestamp, val int64, operationsAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlOperationsDataPoint(ts pcommon.Timestamp, val int64, operationsAttributeValue string) { mb.metricMysqlOperations.recordDataPoint(mb.startTime, ts, val, operationsAttributeValue) } // RecordMysqlPageOperationsDataPoint adds a data point to mysql.page_operations metric. -func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pdata.Timestamp, val int64, pageOperationsAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlPageOperationsDataPoint(ts pcommon.Timestamp, val int64, pageOperationsAttributeValue string) { mb.metricMysqlPageOperations.recordDataPoint(mb.startTime, ts, val, pageOperationsAttributeValue) } // RecordMysqlRowLocksDataPoint adds a data point to mysql.row_locks metric. -func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pdata.Timestamp, val int64, rowLocksAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlRowLocksDataPoint(ts pcommon.Timestamp, val int64, rowLocksAttributeValue string) { mb.metricMysqlRowLocks.recordDataPoint(mb.startTime, ts, val, rowLocksAttributeValue) } // RecordMysqlRowOperationsDataPoint adds a data point to mysql.row_operations metric. -func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pdata.Timestamp, val int64, rowOperationsAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlRowOperationsDataPoint(ts pcommon.Timestamp, val int64, rowOperationsAttributeValue string) { mb.metricMysqlRowOperations.recordDataPoint(mb.startTime, ts, val, rowOperationsAttributeValue) } // RecordMysqlSortsDataPoint adds a data point to mysql.sorts metric. -func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pdata.Timestamp, val int64, sortsAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlSortsDataPoint(ts pcommon.Timestamp, val int64, sortsAttributeValue string) { mb.metricMysqlSorts.recordDataPoint(mb.startTime, ts, val, sortsAttributeValue) } // RecordMysqlThreadsDataPoint adds a data point to mysql.threads metric. -func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pdata.Timestamp, val int64, threadsAttributeValue string) { +func (mb *MetricsBuilder) RecordMysqlThreadsDataPoint(ts pcommon.Timestamp, val int64, threadsAttributeValue string) { mb.metricMysqlThreads.recordDataPoint(mb.startTime, ts, val, threadsAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/mysqlreceiver/scraper.go b/receiver/mysqlreceiver/scraper.go index 2b8ffa7eefee..85d6fea19224 100644 --- a/receiver/mysqlreceiver/scraper.go +++ b/receiver/mysqlreceiver/scraper.go @@ -21,7 +21,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -68,12 +69,12 @@ func (m *mySQLScraper) shutdown(context.Context) error { } // scrape scrapes the mysql db metric stats, transforms them and labels them into a metric slices. -func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { +func (m *mySQLScraper) scrape(context.Context) (pmetric.Metrics, error) { if m.sqlclient == nil { - return pdata.Metrics{}, errors.New("failed to connect to http client") + return pmetric.Metrics{}, errors.New("failed to connect to http client") } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) // collect innodb metrics. innodbStats, innoErr := m.sqlclient.getInnodbStats() @@ -97,7 +98,7 @@ func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { globalStats, err := m.sqlclient.getGlobalStats() if err != nil { m.logger.Error("Failed to fetch global stats", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } m.recordDataPages(now, globalStats, errors) @@ -511,7 +512,7 @@ func (m *mySQLScraper) scrape(context.Context) (pdata.Metrics, error) { return m.mb.Emit(), errors.Combine() } -func (m *mySQLScraper) recordDataPages(now pdata.Timestamp, globalStats map[string]string, errors scrapererror.ScrapeErrors) { +func (m *mySQLScraper) recordDataPages(now pcommon.Timestamp, globalStats map[string]string, errors scrapererror.ScrapeErrors) { dirty, err := parseInt(globalStats["Innodb_buffer_pool_pages_dirty"]) if err != nil { errors.AddPartial(2, err) // we need dirty to calculate free, so 2 data points lost here @@ -527,7 +528,7 @@ func (m *mySQLScraper) recordDataPages(now pdata.Timestamp, globalStats map[stri m.mb.RecordMysqlBufferPoolDataPagesDataPoint(now, data-dirty, "clean") } -func (m *mySQLScraper) recordDataUsage(now pdata.Timestamp, globalStats map[string]string, errors scrapererror.ScrapeErrors) { +func (m *mySQLScraper) recordDataUsage(now pcommon.Timestamp, globalStats map[string]string, errors scrapererror.ScrapeErrors) { dirty, err := parseInt(globalStats["Innodb_buffer_pool_bytes_dirty"]) if err != nil { errors.AddPartial(2, err) // we need dirty to calculate free, so 2 data points lost here diff --git a/receiver/nginxreceiver/go.mod b/receiver/nginxreceiver/go.mod index 3933768e4154..643a0738c81c 100644 --- a/receiver/nginxreceiver/go.mod +++ b/receiver/nginxreceiver/go.mod @@ -8,8 +8,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/stretchr/testify v1.7.1 github.com/testcontainers/testcontainers-go v0.13.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 // indirect @@ -19,7 +19,7 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.23 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/containerd v1.5.9 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -37,7 +37,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/uuid v1.3.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -55,7 +55,6 @@ require ( github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/rs/cors v1.8.2 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -63,8 +62,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -75,3 +74,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/nginxreceiver/go.sum b/receiver/nginxreceiver/go.sum index 584c8cbf88e5..9399c3448184 100644 --- a/receiver/nginxreceiver/go.sum +++ b/receiver/nginxreceiver/go.sum @@ -102,8 +102,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -405,8 +406,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -474,8 +475,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -676,8 +677,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -746,10 +745,10 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -759,7 +758,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -859,8 +858,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -951,8 +951,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go b/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go index 1d633f6a858f..a27d7d74b805 100644 --- a/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/nginxreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -39,7 +40,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricNginxConnectionsAccepted struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -49,12 +50,12 @@ func (m *metricNginxConnectionsAccepted) init() { m.data.SetName("nginx.connections_accepted") m.data.SetDescription("The total number of accepted client connections") m.data.SetUnit("connections") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricNginxConnectionsAccepted) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricNginxConnectionsAccepted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -72,7 +73,7 @@ func (m *metricNginxConnectionsAccepted) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricNginxConnectionsAccepted) emit(metrics pdata.MetricSlice) { +func (m *metricNginxConnectionsAccepted) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -83,14 +84,14 @@ func (m *metricNginxConnectionsAccepted) emit(metrics pdata.MetricSlice) { func newMetricNginxConnectionsAccepted(settings MetricSettings) metricNginxConnectionsAccepted { m := metricNginxConnectionsAccepted{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricNginxConnectionsCurrent struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -100,11 +101,11 @@ func (m *metricNginxConnectionsCurrent) init() { m.data.SetName("nginx.connections_current") m.data.SetDescription("The current number of nginx connections by state") m.data.SetUnit("connections") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricNginxConnectionsCurrent) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, stateAttributeValue string) { +func (m *metricNginxConnectionsCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -112,7 +113,7 @@ func (m *metricNginxConnectionsCurrent) recordDataPoint(start pdata.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -123,7 +124,7 @@ func (m *metricNginxConnectionsCurrent) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricNginxConnectionsCurrent) emit(metrics pdata.MetricSlice) { +func (m *metricNginxConnectionsCurrent) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -134,14 +135,14 @@ func (m *metricNginxConnectionsCurrent) emit(metrics pdata.MetricSlice) { func newMetricNginxConnectionsCurrent(settings MetricSettings) metricNginxConnectionsCurrent { m := metricNginxConnectionsCurrent{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricNginxConnectionsHandled struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -151,12 +152,12 @@ func (m *metricNginxConnectionsHandled) init() { m.data.SetName("nginx.connections_handled") m.data.SetDescription("The total number of handled connections. Generally, the parameter value is the same as nginx.connections_accepted unless some resource limits have been reached (for example, the worker_connections limit).") m.data.SetUnit("connections") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricNginxConnectionsHandled) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricNginxConnectionsHandled) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -174,7 +175,7 @@ func (m *metricNginxConnectionsHandled) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricNginxConnectionsHandled) emit(metrics pdata.MetricSlice) { +func (m *metricNginxConnectionsHandled) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -185,14 +186,14 @@ func (m *metricNginxConnectionsHandled) emit(metrics pdata.MetricSlice) { func newMetricNginxConnectionsHandled(settings MetricSettings) metricNginxConnectionsHandled { m := metricNginxConnectionsHandled{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricNginxRequests struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -202,12 +203,12 @@ func (m *metricNginxRequests) init() { m.data.SetName("nginx.requests") m.data.SetDescription("Total number of requests made to the server since it started") m.data.SetUnit("requests") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricNginxRequests) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricNginxRequests) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -225,7 +226,7 @@ func (m *metricNginxRequests) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricNginxRequests) emit(metrics pdata.MetricSlice) { +func (m *metricNginxRequests) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -236,7 +237,7 @@ func (m *metricNginxRequests) emit(metrics pdata.MetricSlice) { func newMetricNginxRequests(settings MetricSettings) metricNginxRequests { m := metricNginxRequests{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -245,10 +246,10 @@ func newMetricNginxRequests(settings MetricSettings) metricNginxRequests { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricNginxConnectionsAccepted metricNginxConnectionsAccepted metricNginxConnectionsCurrent metricNginxConnectionsCurrent metricNginxConnectionsHandled metricNginxConnectionsHandled @@ -259,7 +260,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -267,8 +268,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricNginxConnectionsAccepted: newMetricNginxConnectionsAccepted(settings.NginxConnectionsAccepted), metricNginxConnectionsCurrent: newMetricNginxConnectionsCurrent(settings.NginxConnectionsCurrent), metricNginxConnectionsHandled: newMetricNginxConnectionsHandled(settings.NginxConnectionsHandled), @@ -281,7 +282,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -291,14 +292,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -319,37 +320,37 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordNginxConnectionsAcceptedDataPoint adds a data point to nginx.connections_accepted metric. -func (mb *MetricsBuilder) RecordNginxConnectionsAcceptedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordNginxConnectionsAcceptedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricNginxConnectionsAccepted.recordDataPoint(mb.startTime, ts, val) } // RecordNginxConnectionsCurrentDataPoint adds a data point to nginx.connections_current metric. -func (mb *MetricsBuilder) RecordNginxConnectionsCurrentDataPoint(ts pdata.Timestamp, val int64, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordNginxConnectionsCurrentDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue string) { mb.metricNginxConnectionsCurrent.recordDataPoint(mb.startTime, ts, val, stateAttributeValue) } // RecordNginxConnectionsHandledDataPoint adds a data point to nginx.connections_handled metric. -func (mb *MetricsBuilder) RecordNginxConnectionsHandledDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordNginxConnectionsHandledDataPoint(ts pcommon.Timestamp, val int64) { mb.metricNginxConnectionsHandled.recordDataPoint(mb.startTime, ts, val) } // RecordNginxRequestsDataPoint adds a data point to nginx.requests metric. -func (mb *MetricsBuilder) RecordNginxRequestsDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordNginxRequestsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricNginxRequests.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/nginxreceiver/scraper.go b/receiver/nginxreceiver/scraper.go index 26ff768eaefd..a79dcd70d823 100644 --- a/receiver/nginxreceiver/scraper.go +++ b/receiver/nginxreceiver/scraper.go @@ -21,7 +21,8 @@ import ( "github.com/nginxinc/nginx-prometheus-exporter/client" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/nginxreceiver/internal/metadata" @@ -57,24 +58,24 @@ func (r *nginxScraper) start(_ context.Context, host component.Host) error { return nil } -func (r *nginxScraper) scrape(context.Context) (pdata.Metrics, error) { +func (r *nginxScraper) scrape(context.Context) (pmetric.Metrics, error) { // Init client in scrape method in case there are transient errors in the constructor. if r.client == nil { var err error r.client, err = client.NewNginxClient(r.httpClient, r.cfg.HTTPClientSettings.Endpoint) if err != nil { r.client = nil - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } } stats, err := r.client.GetStubStats() if err != nil { r.settings.Logger.Error("Failed to fetch nginx stats", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) r.mb.RecordNginxRequestsDataPoint(now, stats.Requests) r.mb.RecordNginxConnectionsAcceptedDataPoint(now, stats.Connections.Accepted) diff --git a/receiver/opencensusreceiver/go.mod b/receiver/opencensusreceiver/go.mod index 55cad603ce40..248118cd656c 100644 --- a/receiver/opencensusreceiver/go.mod +++ b/receiver/opencensusreceiver/go.mod @@ -12,8 +12,7 @@ require ( github.com/rs/cors v1.8.2 github.com/soheilhy/cmux v0.1.5 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 go.opentelemetry.io/otel v1.6.3 go.opentelemetry.io/otel/trace v1.6.3 @@ -21,7 +20,10 @@ require ( google.golang.org/protobuf v1.28.0 ) -require go.opentelemetry.io/otel/sdk v1.6.3 +require ( + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk v1.6.3 +) require ( cloud.google.com/go/compute v1.5.0 // indirect @@ -33,19 +35,19 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mostynb/go-grpc-compression v1.1.16 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect @@ -62,3 +64,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/corei replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent => ../../internal/sharedcomponent replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/opencensusreceiver/go.sum b/receiver/opencensusreceiver/go.sum index 66728f42d523..04c1cc21f0af 100644 --- a/receiver/opencensusreceiver/go.sum +++ b/receiver/opencensusreceiver/go.sum @@ -69,7 +69,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -198,7 +198,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -237,8 +236,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -296,8 +295,6 @@ github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmR github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -322,10 +319,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -427,8 +426,9 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/receiver/opencensusreceiver/internal/octrace/opencensus.go b/receiver/opencensusreceiver/internal/octrace/opencensus.go index 1d460247c2d6..cf153ba886b8 100644 --- a/receiver/opencensusreceiver/internal/octrace/opencensus.go +++ b/receiver/opencensusreceiver/internal/octrace/opencensus.go @@ -26,8 +26,8 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/ptrace" internaldata "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus" ) @@ -138,7 +138,7 @@ func (ocr *Receiver) processReceivedMsg( return lastNonNilNode, resource, err } -func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, td pdata.Traces) error { +func (ocr *Receiver) sendToNextConsumer(longLivedRPCCtx context.Context, td ptrace.Traces) error { ctx := ocr.obsrecv.StartTracesOp(longLivedRPCCtx) err := ocr.nextConsumer.ConsumeTraces(ctx, td) diff --git a/receiver/opencensusreceiver/opencensus_test.go b/receiver/opencensusreceiver/opencensus_test.go index ff14ef826778..b7fa7d4825a1 100644 --- a/receiver/opencensusreceiver/opencensus_test.go +++ b/receiver/opencensusreceiver/opencensus_test.go @@ -44,8 +44,9 @@ import ( "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport/obsreporttest" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" @@ -662,7 +663,7 @@ func (esc *errOrSinkConsumer) Capabilities() consumer.Capabilities { } // ConsumeTraces stores traces to this sink. -func (esc *errOrSinkConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces) error { +func (esc *errOrSinkConsumer) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { esc.mu.Lock() defer esc.mu.Unlock() @@ -674,7 +675,7 @@ func (esc *errOrSinkConsumer) ConsumeTraces(ctx context.Context, td pdata.Traces } // ConsumeMetrics stores metrics to this sink. -func (esc *errOrSinkConsumer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (esc *errOrSinkConsumer) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { esc.mu.Lock() defer esc.mu.Unlock() diff --git a/receiver/podmanreceiver/go.mod b/receiver/podmanreceiver/go.mod index 648df3e53682..e100d2f58157 100644 --- a/receiver/podmanreceiver/go.mod +++ b/receiver/podmanreceiver/go.mod @@ -4,8 +4,9 @@ go 1.17 require ( github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 ) @@ -14,7 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -22,15 +23,16 @@ require ( github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/podmanreceiver/go.sum b/receiver/podmanreceiver/go.sum index e31b2f6c8ffb..21c92f35e219 100644 --- a/receiver/podmanreceiver/go.sum +++ b/receiver/podmanreceiver/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -70,7 +70,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -100,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -148,8 +147,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -164,17 +161,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -210,7 +209,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -233,8 +232,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/podmanreceiver/metrics.go b/receiver/podmanreceiver/metrics.go index b68d46a8dc41..ad1379ff2a2e 100644 --- a/receiver/podmanreceiver/metrics.go +++ b/receiver/podmanreceiver/metrics.go @@ -21,8 +21,9 @@ import ( "fmt" "time" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) type point struct { @@ -31,8 +32,8 @@ type point struct { attributes map[string]string } -func translateStatsToMetrics(stats *containerStats, ts time.Time, rm pdata.ResourceMetrics) { - pbts := pdata.NewTimestampFromTime(ts) +func translateStatsToMetrics(stats *containerStats, ts time.Time, rm pmetric.ResourceMetrics) { + pbts := pcommon.NewTimestampFromTime(ts) resource := rm.Resource() resource.Attributes().InsertString(conventions.AttributeContainerRuntime, "podman") @@ -46,23 +47,23 @@ func translateStatsToMetrics(stats *containerStats, ts time.Time, rm pdata.Resou appendMemoryMetrics(ms, stats, pbts) } -func appendMemoryMetrics(ms pdata.MetricSlice, stats *containerStats, ts pdata.Timestamp) { +func appendMemoryMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { gaugeI(ms, "memory.usage.limit", "By", []point{{intVal: stats.MemLimit}}, ts) gaugeI(ms, "memory.usage.total", "By", []point{{intVal: stats.MemUsage}}, ts) gaugeF(ms, "memory.percent", "1", []point{{doubleVal: stats.MemPerc}}, ts) } -func appendNetworkMetrics(ms pdata.MetricSlice, stats *containerStats, ts pdata.Timestamp) { +func appendNetworkMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { sum(ms, "network.io.usage.tx_bytes", "By", []point{{intVal: stats.NetInput}}, ts) sum(ms, "network.io.usage.rx_bytes", "By", []point{{intVal: stats.NetOutput}}, ts) } -func appendIOMetrics(ms pdata.MetricSlice, stats *containerStats, ts pdata.Timestamp) { +func appendIOMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { sum(ms, "blockio.io_service_bytes_recursive.write", "By", []point{{intVal: stats.BlockOutput}}, ts) sum(ms, "blockio.io_service_bytes_recursive.read", "By", []point{{intVal: stats.BlockInput}}, ts) } -func appendCPUMetrics(ms pdata.MetricSlice, stats *containerStats, ts pdata.Timestamp) { +func appendCPUMetrics(ms pmetric.MetricSlice, stats *containerStats, ts pcommon.Timestamp) { sum(ms, "cpu.usage.system", "ns", []point{{intVal: stats.CPUSystemNano}}, ts) sum(ms, "cpu.usage.total", "ns", []point{{intVal: stats.CPUNano}}, ts) gaugeF(ms, "cpu.percent", "1", []point{{doubleVal: stats.CPU}}, ts) @@ -79,20 +80,20 @@ func appendCPUMetrics(ms pdata.MetricSlice, stats *containerStats, ts pdata.Time sum(ms, "cpu.usage.percpu", "ns", points, ts) } -func initMetric(ms pdata.MetricSlice, name, unit string) pdata.Metric { +func initMetric(ms pmetric.MetricSlice, name, unit string) pmetric.Metric { m := ms.AppendEmpty() m.SetName(fmt.Sprintf("container.%s", name)) m.SetUnit(unit) return m } -func sum(ilm pdata.MetricSlice, metricName string, unit string, points []point, ts pdata.Timestamp) { +func sum(ilm pmetric.MetricSlice, metricName string, unit string, points []point, ts pcommon.Timestamp) { metric := initMetric(ilm, metricName, unit) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() sum.SetIsMonotonic(true) - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) dataPoints := sum.DataPoints() @@ -104,15 +105,15 @@ func sum(ilm pdata.MetricSlice, metricName string, unit string, points []point, } } -func gauge(ms pdata.MetricSlice, metricName string, unit string) pdata.NumberDataPointSlice { +func gauge(ms pmetric.MetricSlice, metricName string, unit string) pmetric.NumberDataPointSlice { metric := initMetric(ms, metricName, unit) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) gauge := metric.Gauge() return gauge.DataPoints() } -func gaugeI(ms pdata.MetricSlice, metricName string, unit string, points []point, ts pdata.Timestamp) { +func gaugeI(ms pmetric.MetricSlice, metricName string, unit string, points []point, ts pcommon.Timestamp) { dataPoints := gauge(ms, metricName, unit) for _, pt := range points { dataPoint := dataPoints.AppendEmpty() @@ -122,7 +123,7 @@ func gaugeI(ms pdata.MetricSlice, metricName string, unit string, points []point } } -func gaugeF(ms pdata.MetricSlice, metricName string, unit string, points []point, ts pdata.Timestamp) { +func gaugeF(ms pmetric.MetricSlice, metricName string, unit string, points []point, ts pcommon.Timestamp) { dataPoints := gauge(ms, metricName, unit) for _, pt := range points { dataPoint := dataPoints.AppendEmpty() @@ -132,7 +133,7 @@ func gaugeF(ms pdata.MetricSlice, metricName string, unit string, points []point } } -func setDataPointAttributes(dataPoint pdata.NumberDataPoint, attributes map[string]string) { +func setDataPointAttributes(dataPoint pmetric.NumberDataPoint, attributes map[string]string) { for k, v := range attributes { dataPoint.Attributes().InsertString(k, v) } diff --git a/receiver/podmanreceiver/metrics_test.go b/receiver/podmanreceiver/metrics_test.go index 9aa0a49d7582..ec074decdc50 100644 --- a/receiver/podmanreceiver/metrics_test.go +++ b/receiver/podmanreceiver/metrics_test.go @@ -23,18 +23,18 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) func TestTranslateStatsToMetrics(t *testing.T) { ts := time.Now() stats := genContainerStats() - md := pdata.NewMetrics() + md := pmetric.NewMetrics() translateStatsToMetrics(stats, ts, md.ResourceMetrics().AppendEmpty()) assertStatsEqualToMetrics(t, stats, md) } -func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pdata.Metrics) { +func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pmetric.Metrics) { assert.Equal(t, md.ResourceMetrics().Len(), 1) rsm := md.ResourceMetrics().At(0) @@ -58,33 +58,33 @@ func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pda m := metrics.At(i) switch m.Name() { case "container.memory.usage.limit": - assertMetricEqual(t, m, pdata.MetricDataTypeGauge, []point{{intVal: podmanStats.MemLimit}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeGauge, []point{{intVal: podmanStats.MemLimit}}) case "container.memory.usage.total": - assertMetricEqual(t, m, pdata.MetricDataTypeGauge, []point{{intVal: podmanStats.MemUsage}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeGauge, []point{{intVal: podmanStats.MemUsage}}) case "container.memory.percent": - assertMetricEqual(t, m, pdata.MetricDataTypeGauge, []point{{doubleVal: podmanStats.MemPerc}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeGauge, []point{{doubleVal: podmanStats.MemPerc}}) case "container.network.io.usage.tx_bytes": - assertMetricEqual(t, m, pdata.MetricDataTypeSum, []point{{intVal: podmanStats.NetInput}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeSum, []point{{intVal: podmanStats.NetInput}}) case "container.network.io.usage.rx_bytes": - assertMetricEqual(t, m, pdata.MetricDataTypeSum, []point{{intVal: podmanStats.NetOutput}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeSum, []point{{intVal: podmanStats.NetOutput}}) case "container.blockio.io_service_bytes_recursive.write": - assertMetricEqual(t, m, pdata.MetricDataTypeSum, []point{{intVal: podmanStats.BlockOutput}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeSum, []point{{intVal: podmanStats.BlockOutput}}) case "container.blockio.io_service_bytes_recursive.read": - assertMetricEqual(t, m, pdata.MetricDataTypeSum, []point{{intVal: podmanStats.BlockInput}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeSum, []point{{intVal: podmanStats.BlockInput}}) case "container.cpu.usage.system": - assertMetricEqual(t, m, pdata.MetricDataTypeSum, []point{{intVal: podmanStats.CPUSystemNano}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeSum, []point{{intVal: podmanStats.CPUSystemNano}}) case "container.cpu.usage.total": - assertMetricEqual(t, m, pdata.MetricDataTypeSum, []point{{intVal: podmanStats.CPUNano}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeSum, []point{{intVal: podmanStats.CPUNano}}) case "container.cpu.percent": - assertMetricEqual(t, m, pdata.MetricDataTypeGauge, []point{{doubleVal: podmanStats.CPU}}) + assertMetricEqual(t, m, pmetric.MetricDataTypeGauge, []point{{doubleVal: podmanStats.CPU}}) case "container.cpu.usage.percpu": points := make([]point, len(podmanStats.PerCPU)) for i, v := range podmanStats.PerCPU { points[i] = point{intVal: v, attributes: map[string]string{"core": fmt.Sprintf("cpu%d", i)}} } - assertMetricEqual(t, m, pdata.MetricDataTypeSum, points) + assertMetricEqual(t, m, pmetric.MetricDataTypeSum, points) default: t.Errorf(fmt.Sprintf("unexpected metric: %s", m.Name())) @@ -92,19 +92,19 @@ func assertStatsEqualToMetrics(t *testing.T, podmanStats *containerStats, md pda } } -func assertMetricEqual(t *testing.T, m pdata.Metric, dt pdata.MetricDataType, pts []point) { +func assertMetricEqual(t *testing.T, m pmetric.Metric, dt pmetric.MetricDataType, pts []point) { assert.Equal(t, m.DataType(), dt) switch dt { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: assertPoints(t, m.Gauge().DataPoints(), pts) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: assertPoints(t, m.Sum().DataPoints(), pts) default: t.Errorf("unexpected data type: %s", dt) } } -func assertPoints(t *testing.T, dpts pdata.NumberDataPointSlice, pts []point) { +func assertPoints(t *testing.T, dpts pmetric.NumberDataPointSlice, pts []point) { assert.Equal(t, dpts.Len(), len(pts)) for i, expected := range pts { got := dpts.At(i) diff --git a/receiver/podmanreceiver/receiver.go b/receiver/podmanreceiver/receiver.go index 9e2ce5cb5179..4abfd4bc518a 100644 --- a/receiver/podmanreceiver/receiver.go +++ b/receiver/podmanreceiver/receiver.go @@ -23,7 +23,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" ) @@ -72,16 +72,16 @@ func (r *receiver) start(context.Context, component.Host) error { return err } -func (r *receiver) scrape(ctx context.Context) (pdata.Metrics, error) { +func (r *receiver) scrape(ctx context.Context) (pmetric.Metrics, error) { var err error stats, err := r.client.stats(ctx) if err != nil { r.set.Logger.Error("error fetching stats", zap.Error(err)) - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } - md := pdata.NewMetrics() + md := pmetric.NewMetrics() for i := range stats { translateStatsToMetrics(&stats[i], time.Now(), md.ResourceMetrics().AppendEmpty()) } diff --git a/receiver/podmanreceiver/receiver_test.go b/receiver/podmanreceiver/receiver_test.go index 542cfed6e16e..dece6f8b92d5 100644 --- a/receiver/podmanreceiver/receiver_test.go +++ b/receiver/podmanreceiver/receiver_test.go @@ -28,7 +28,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" ) @@ -105,13 +105,13 @@ func (c mockClient) ping(context.Context) error { return nil } -type mockConsumer chan pdata.Metrics +type mockConsumer chan pmetric.Metrics func (m mockConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{} } -func (m mockConsumer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (m mockConsumer) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { m <- md return nil } diff --git a/receiver/postgresqlreceiver/go.mod b/receiver/postgresqlreceiver/go.mod index feaefd26f081..b9b167786402 100644 --- a/receiver/postgresqlreceiver/go.mod +++ b/receiver/postgresqlreceiver/go.mod @@ -6,20 +6,21 @@ require ( github.com/lib/pq v1.10.5 github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) -require go.opentelemetry.io/collector/model v0.48.0 - -require github.com/testcontainers/testcontainers-go v0.13.0 +require ( + github.com/testcontainers/testcontainers-go v0.13.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 +) require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.23 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/containerd v1.5.9 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -32,7 +33,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -50,16 +51,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.2.0 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -68,3 +67,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/postgresqlreceiver/go.sum b/receiver/postgresqlreceiver/go.sum index b97885ec5e4b..467d0d3bf3dd 100644 --- a/receiver/postgresqlreceiver/go.sum +++ b/receiver/postgresqlreceiver/go.sum @@ -102,8 +102,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -401,8 +402,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -467,8 +468,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -661,8 +662,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -732,15 +731,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -838,8 +837,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -929,8 +929,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -942,7 +942,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go index c1c9ec007cc6..dfe3623211b1 100644 --- a/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/postgresqlreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -51,7 +52,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricPostgresqlBackends struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -61,13 +62,13 @@ func (m *metricPostgresqlBackends) init() { m.data.SetName("postgresql.backends") m.data.SetDescription("The number of backends.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlBackends) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlBackends) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -75,7 +76,7 @@ func (m *metricPostgresqlBackends) recordDataPoint(start pdata.Timestamp, ts pda dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -86,7 +87,7 @@ func (m *metricPostgresqlBackends) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricPostgresqlBackends) emit(metrics pdata.MetricSlice) { +func (m *metricPostgresqlBackends) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -97,14 +98,14 @@ func (m *metricPostgresqlBackends) emit(metrics pdata.MetricSlice) { func newMetricPostgresqlBackends(settings MetricSettings) metricPostgresqlBackends { m := metricPostgresqlBackends{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlBlocksRead struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -114,13 +115,13 @@ func (m *metricPostgresqlBlocksRead) init() { m.data.SetName("postgresql.blocks_read") m.data.SetDescription("The number of blocks read.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlBlocksRead) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue string) { +func (m *metricPostgresqlBlocksRead) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue string) { if !m.settings.Enabled { return } @@ -128,9 +129,9 @@ func (m *metricPostgresqlBlocksRead) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) - dp.Attributes().Insert(A.Table, pdata.NewValueString(tableAttributeValue)) - dp.Attributes().Insert(A.Source, pdata.NewValueString(sourceAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Table, pcommon.NewValueString(tableAttributeValue)) + dp.Attributes().Insert(A.Source, pcommon.NewValueString(sourceAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -141,7 +142,7 @@ func (m *metricPostgresqlBlocksRead) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricPostgresqlBlocksRead) emit(metrics pdata.MetricSlice) { +func (m *metricPostgresqlBlocksRead) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -152,14 +153,14 @@ func (m *metricPostgresqlBlocksRead) emit(metrics pdata.MetricSlice) { func newMetricPostgresqlBlocksRead(settings MetricSettings) metricPostgresqlBlocksRead { m := metricPostgresqlBlocksRead{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlCommits struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -169,13 +170,13 @@ func (m *metricPostgresqlCommits) init() { m.data.SetName("postgresql.commits") m.data.SetDescription("The number of commits.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlCommits) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlCommits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -183,7 +184,7 @@ func (m *metricPostgresqlCommits) recordDataPoint(start pdata.Timestamp, ts pdat dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -194,7 +195,7 @@ func (m *metricPostgresqlCommits) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricPostgresqlCommits) emit(metrics pdata.MetricSlice) { +func (m *metricPostgresqlCommits) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -205,14 +206,14 @@ func (m *metricPostgresqlCommits) emit(metrics pdata.MetricSlice) { func newMetricPostgresqlCommits(settings MetricSettings) metricPostgresqlCommits { m := metricPostgresqlCommits{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlDbSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -222,13 +223,13 @@ func (m *metricPostgresqlDbSize) init() { m.data.SetName("postgresql.db_size") m.data.SetDescription("The database disk usage.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlDbSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlDbSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -236,7 +237,7 @@ func (m *metricPostgresqlDbSize) recordDataPoint(start pdata.Timestamp, ts pdata dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -247,7 +248,7 @@ func (m *metricPostgresqlDbSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricPostgresqlDbSize) emit(metrics pdata.MetricSlice) { +func (m *metricPostgresqlDbSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -258,14 +259,14 @@ func (m *metricPostgresqlDbSize) emit(metrics pdata.MetricSlice) { func newMetricPostgresqlDbSize(settings MetricSettings) metricPostgresqlDbSize { m := metricPostgresqlDbSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlOperations struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -275,13 +276,13 @@ func (m *metricPostgresqlOperations) init() { m.data.SetName("postgresql.operations") m.data.SetDescription("The number of db row operations.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlOperations) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue string) { +func (m *metricPostgresqlOperations) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue string) { if !m.settings.Enabled { return } @@ -289,9 +290,9 @@ func (m *metricPostgresqlOperations) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) - dp.Attributes().Insert(A.Table, pdata.NewValueString(tableAttributeValue)) - dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Table, pcommon.NewValueString(tableAttributeValue)) + dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -302,7 +303,7 @@ func (m *metricPostgresqlOperations) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricPostgresqlOperations) emit(metrics pdata.MetricSlice) { +func (m *metricPostgresqlOperations) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -313,14 +314,14 @@ func (m *metricPostgresqlOperations) emit(metrics pdata.MetricSlice) { func newMetricPostgresqlOperations(settings MetricSettings) metricPostgresqlOperations { m := metricPostgresqlOperations{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlRollbacks struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -330,13 +331,13 @@ func (m *metricPostgresqlRollbacks) init() { m.data.SetName("postgresql.rollbacks") m.data.SetDescription("The number of rollbacks.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlRollbacks) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (m *metricPostgresqlRollbacks) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string) { if !m.settings.Enabled { return } @@ -344,7 +345,7 @@ func (m *metricPostgresqlRollbacks) recordDataPoint(start pdata.Timestamp, ts pd dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -355,7 +356,7 @@ func (m *metricPostgresqlRollbacks) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricPostgresqlRollbacks) emit(metrics pdata.MetricSlice) { +func (m *metricPostgresqlRollbacks) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -366,14 +367,14 @@ func (m *metricPostgresqlRollbacks) emit(metrics pdata.MetricSlice) { func newMetricPostgresqlRollbacks(settings MetricSettings) metricPostgresqlRollbacks { m := metricPostgresqlRollbacks{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricPostgresqlRows struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -383,13 +384,13 @@ func (m *metricPostgresqlRows) init() { m.data.SetName("postgresql.rows") m.data.SetDescription("The number of rows in the database.") m.data.SetUnit("1") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricPostgresqlRows) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue string) { +func (m *metricPostgresqlRows) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -397,9 +398,9 @@ func (m *metricPostgresqlRows) recordDataPoint(start pdata.Timestamp, ts pdata.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Database, pdata.NewValueString(databaseAttributeValue)) - dp.Attributes().Insert(A.Table, pdata.NewValueString(tableAttributeValue)) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.Database, pcommon.NewValueString(databaseAttributeValue)) + dp.Attributes().Insert(A.Table, pcommon.NewValueString(tableAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -410,7 +411,7 @@ func (m *metricPostgresqlRows) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricPostgresqlRows) emit(metrics pdata.MetricSlice) { +func (m *metricPostgresqlRows) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -421,7 +422,7 @@ func (m *metricPostgresqlRows) emit(metrics pdata.MetricSlice) { func newMetricPostgresqlRows(settings MetricSettings) metricPostgresqlRows { m := metricPostgresqlRows{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -430,10 +431,10 @@ func newMetricPostgresqlRows(settings MetricSettings) metricPostgresqlRows { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricPostgresqlBackends metricPostgresqlBackends metricPostgresqlBlocksRead metricPostgresqlBlocksRead metricPostgresqlCommits metricPostgresqlCommits @@ -447,7 +448,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -455,8 +456,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricPostgresqlBackends: newMetricPostgresqlBackends(settings.PostgresqlBackends), metricPostgresqlBlocksRead: newMetricPostgresqlBlocksRead(settings.PostgresqlBlocksRead), metricPostgresqlCommits: newMetricPostgresqlCommits(settings.PostgresqlCommits), @@ -472,7 +473,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -482,14 +483,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -513,52 +514,52 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordPostgresqlBackendsDataPoint adds a data point to postgresql.backends metric. -func (mb *MetricsBuilder) RecordPostgresqlBackendsDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordPostgresqlBackendsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricPostgresqlBackends.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordPostgresqlBlocksReadDataPoint adds a data point to postgresql.blocks_read metric. -func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue string) { +func (mb *MetricsBuilder) RecordPostgresqlBlocksReadDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, sourceAttributeValue string) { mb.metricPostgresqlBlocksRead.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, sourceAttributeValue) } // RecordPostgresqlCommitsDataPoint adds a data point to postgresql.commits metric. -func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordPostgresqlCommitsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricPostgresqlCommits.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordPostgresqlDbSizeDataPoint adds a data point to postgresql.db_size metric. -func (mb *MetricsBuilder) RecordPostgresqlDbSizeDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordPostgresqlDbSizeDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricPostgresqlDbSize.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordPostgresqlOperationsDataPoint adds a data point to postgresql.operations metric. -func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue string) { +func (mb *MetricsBuilder) RecordPostgresqlOperationsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, operationAttributeValue string) { mb.metricPostgresqlOperations.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, operationAttributeValue) } // RecordPostgresqlRollbacksDataPoint adds a data point to postgresql.rollbacks metric. -func (mb *MetricsBuilder) RecordPostgresqlRollbacksDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string) { +func (mb *MetricsBuilder) RecordPostgresqlRollbacksDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string) { mb.metricPostgresqlRollbacks.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue) } // RecordPostgresqlRowsDataPoint adds a data point to postgresql.rows metric. -func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pdata.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordPostgresqlRowsDataPoint(ts pcommon.Timestamp, val int64, databaseAttributeValue string, tableAttributeValue string, stateAttributeValue string) { mb.metricPostgresqlRows.recordDataPoint(mb.startTime, ts, val, databaseAttributeValue, tableAttributeValue, stateAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/postgresqlreceiver/scraper.go b/receiver/postgresqlreceiver/scraper.go index 3ca50aa3b57d..b79e97009ac5 100644 --- a/receiver/postgresqlreceiver/scraper.go +++ b/receiver/postgresqlreceiver/scraper.go @@ -19,7 +19,8 @@ import ( "strconv" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -63,12 +64,12 @@ func newPostgreSQLScraper( } // scrape scrapes the metric stats, transforms them and attributes them into a metric slices. -func (p *postgreSQLScraper) scrape(ctx context.Context) (pdata.Metrics, error) { +func (p *postgreSQLScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { databases := p.config.Databases listClient, err := p.clientFactory.getClient(p.config, "") if err != nil { p.logger.Error("Failed to initialize connection to postgres", zap.Error(err)) - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } defer listClient.Close() @@ -76,12 +77,12 @@ func (p *postgreSQLScraper) scrape(ctx context.Context) (pdata.Metrics, error) { dbList, err := listClient.listDatabases(ctx) if err != nil { p.logger.Error("Failed to request list of databases from postgres", zap.Error(err)) - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } databases = dbList } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) var errors scrapererror.ScrapeErrors @@ -107,7 +108,7 @@ func (p *postgreSQLScraper) scrape(ctx context.Context) (pdata.Metrics, error) { func (p *postgreSQLScraper) collectBlockReads( ctx context.Context, - now pdata.Timestamp, + now pcommon.Timestamp, client client, errors scrapererror.ScrapeErrors, ) { @@ -135,7 +136,7 @@ func (p *postgreSQLScraper) collectBlockReads( func (p *postgreSQLScraper) collectDatabaseTableMetrics( ctx context.Context, - now pdata.Timestamp, + now pcommon.Timestamp, client client, errors scrapererror.ScrapeErrors, ) { @@ -182,7 +183,7 @@ func (p *postgreSQLScraper) collectDatabaseTableMetrics( func (p *postgreSQLScraper) collectCommitsAndRollbacks( ctx context.Context, - now pdata.Timestamp, + now pcommon.Timestamp, client client, databases []string, errors scrapererror.ScrapeErrors, @@ -218,7 +219,7 @@ func (p *postgreSQLScraper) collectCommitsAndRollbacks( func (p *postgreSQLScraper) collectDatabaseSize( ctx context.Context, - now pdata.Timestamp, + now pcommon.Timestamp, client client, databases []string, errors scrapererror.ScrapeErrors, @@ -247,7 +248,7 @@ func (p *postgreSQLScraper) collectDatabaseSize( func (p *postgreSQLScraper) collectBackends( ctx context.Context, - now pdata.Timestamp, + now pcommon.Timestamp, client client, databases []string, errors scrapererror.ScrapeErrors, diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go index 9f53d7a86648..07813d87325c 100644 --- a/receiver/postgresqlreceiver/scraper_test.go +++ b/receiver/postgresqlreceiver/scraper_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" @@ -38,7 +38,7 @@ func TestUnsuccessfulScrape(t *testing.T) { actualMetrics, err := scraper.scrape(context.Background()) require.Error(t, err) - require.NoError(t, scrapertest.CompareMetrics(pdata.NewMetrics(), actualMetrics)) + require.NoError(t, scrapertest.CompareMetrics(pmetric.NewMetrics(), actualMetrics)) } func TestScraper(t *testing.T) { diff --git a/receiver/prometheusexecreceiver/go.mod b/receiver/prometheusexecreceiver/go.mod index aa56b7ae341a..df1c58c0af56 100644 --- a/receiver/prometheusexecreceiver/go.mod +++ b/receiver/prometheusexecreceiver/go.mod @@ -9,8 +9,8 @@ require ( github.com/prometheus/common v0.33.0 github.com/prometheus/prometheus v1.8.2-0.20220324155304-4d8bbfd4164c github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -73,7 +73,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b // indirect github.com/linode/linodego v1.3.0 // indirect github.com/mattn/go-colorable v0.1.12 // indirect @@ -97,9 +97,9 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -146,3 +146,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite => ../../pkg/translator/prometheusremotewrite replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => ../../receiver/prometheusreceiver + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/prometheusexecreceiver/go.sum b/receiver/prometheusexecreceiver/go.sum index a84873c42132..791f6c1f072d 100644 --- a/receiver/prometheusexecreceiver/go.sum +++ b/receiver/prometheusexecreceiver/go.sum @@ -51,7 +51,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -187,8 +187,8 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -850,8 +850,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1167,8 +1167,6 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1287,10 +1285,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= diff --git a/receiver/prometheusexecreceiver/receiver_test.go b/receiver/prometheusexecreceiver/receiver_test.go index f70cae747b60..cb17ae5a949f 100644 --- a/receiver/prometheusexecreceiver/receiver_test.go +++ b/receiver/prometheusexecreceiver/receiver_test.go @@ -29,7 +29,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/service/servicetest" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusexecreceiver/subprocessmanager" @@ -83,7 +83,7 @@ func endToEndScrapeTest(t *testing.T, receiverConfig config.Receiver, testName s assert.NoError(t, err, "Start() returned an error") defer func() { assert.NoError(t, wrapper.Shutdown(ctx)) }() - var metrics []pdata.Metrics + var metrics []pmetric.Metrics // Make sure two scrapes have been completed (this implies the process was started, scraped, restarted and finally scraped a second time) const waitFor = 30 * time.Second @@ -102,11 +102,11 @@ func endToEndScrapeTest(t *testing.T, receiverConfig config.Receiver, testName s // assertTwoUniqueValuesScraped iterates over the found metrics and returns true if it finds at least 2 unique metrics, meaning the endpoint // was successfully scraped twice AND the subprocess being handled was stopped and restarted -func assertTwoUniqueValuesScraped(t *testing.T, metricsSlice []pdata.Metrics) { +func assertTwoUniqueValuesScraped(t *testing.T, metricsSlice []pmetric.Metrics) { var value float64 for i := range metricsSlice { ms := metricsSlice[i].ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() - var tempM pdata.Metric + var tempM pmetric.Metric ok := false for j := 0; j < ms.Len(); j++ { if ms.At(j).Name() == "timestamp_now" { @@ -116,7 +116,7 @@ func assertTwoUniqueValuesScraped(t *testing.T, metricsSlice []pdata.Metrics) { } } require.True(t, ok, "timestamp_now metric not found") - assert.Equal(t, pdata.MetricDataTypeGauge, tempM.DataType()) + assert.Equal(t, pmetric.MetricDataTypeGauge, tempM.DataType()) tempV := tempM.Gauge().DataPoints().At(0).DoubleVal() if i != 0 && tempV != value { return diff --git a/receiver/prometheusreceiver/go.mod b/receiver/prometheusreceiver/go.mod index eba6d5c9ddc5..8f1e86cafad1 100644 --- a/receiver/prometheusreceiver/go.mod +++ b/receiver/prometheusreceiver/go.mod @@ -10,8 +10,9 @@ require ( github.com/prometheus/common v0.33.0 github.com/prometheus/prometheus v1.8.2-0.20220324155304-4d8bbfd4164c github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 google.golang.org/protobuf v1.28.0 gopkg.in/yaml.v2 v2.4.0 @@ -19,7 +20,7 @@ require ( require ( cloud.google.com/go/compute v1.5.0 // indirect - contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect + contrib.go.opencensus.io/exporter/prometheus v0.4.1 // indirect github.com/Azure/azure-sdk-for-go v62.0.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.24 // indirect @@ -34,7 +35,7 @@ require ( github.com/armon/go-metrics v0.3.10 // indirect github.com/aws/aws-sdk-go v1.43.32 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect github.com/containerd/containerd v1.6.1 // indirect @@ -83,7 +84,7 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b // indirect github.com/linode/linodego v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect @@ -115,7 +116,6 @@ require ( github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 // indirect github.com/shirou/gopsutil/v3 v3.22.3 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/cobra v1.4.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tidwall/gjson v1.10.2 // indirect @@ -174,3 +174,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourceto replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite => ../../pkg/translator/prometheusremotewrite + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/prometheusreceiver/go.sum b/receiver/prometheusreceiver/go.sum index ac6c74953be6..651a01c5aff6 100644 --- a/receiver/prometheusreceiver/go.sum +++ b/receiver/prometheusreceiver/go.sum @@ -51,8 +51,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -189,8 +189,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -853,8 +854,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1081,6 +1082,7 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1178,8 +1180,6 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1307,10 +1307,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= diff --git a/receiver/prometheusreceiver/internal/metricsutil_pdata_test.go b/receiver/prometheusreceiver/internal/metricsutil_pdata_test.go index 7fd3eb7fb7fa..5b9300099390 100644 --- a/receiver/prometheusreceiver/internal/metricsutil_pdata_test.go +++ b/receiver/prometheusreceiver/internal/metricsutil_pdata_test.go @@ -14,14 +14,17 @@ package internal // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" -import "go.opentelemetry.io/collector/model/pdata" +import ( + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" +) type kv struct { Key, Value string } -func distPointPdata(ts pdata.Timestamp, bounds []float64, counts []uint64) *pdata.HistogramDataPoint { - hdp := pdata.NewHistogramDataPoint() +func distPointPdata(ts pcommon.Timestamp, bounds []float64, counts []uint64) *pmetric.HistogramDataPoint { + hdp := pmetric.NewHistogramDataPoint() hdp.SetExplicitBounds(bounds) hdp.SetBucketCounts(counts) hdp.SetTimestamp(ts) @@ -39,12 +42,12 @@ func distPointPdata(ts pdata.Timestamp, bounds []float64, counts []uint64) *pdat return &hdp } -func cumulativeDistMetricPdata(name string, kvp []*kv, startTs pdata.Timestamp, points ...*pdata.HistogramDataPoint) *pdata.Metric { - metric := pdata.NewMetric() +func cumulativeDistMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.HistogramDataPoint) *pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) histogram := metric.Histogram() - histogram.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histogram.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) destPointL := histogram.DataPoints() // By default the AggregationTemporality is Cumulative until it'll be changed by the caller. @@ -60,18 +63,18 @@ func cumulativeDistMetricPdata(name string, kvp []*kv, startTs pdata.Timestamp, return &metric } -func doublePointPdata(ts pdata.Timestamp, value float64) *pdata.NumberDataPoint { - ndp := pdata.NewNumberDataPoint() +func doublePointPdata(ts pcommon.Timestamp, value float64) *pmetric.NumberDataPoint { + ndp := pmetric.NewNumberDataPoint() ndp.SetTimestamp(ts) ndp.SetDoubleVal(value) return &ndp } -func gaugeMetricPdata(name string, kvp []*kv, startTs pdata.Timestamp, points ...*pdata.NumberDataPoint) *pdata.Metric { - metric := pdata.NewMetric() +func gaugeMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.NumberDataPoint) *pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) destPointL := metric.Gauge().DataPoints() for _, point := range points { @@ -86,8 +89,8 @@ func gaugeMetricPdata(name string, kvp []*kv, startTs pdata.Timestamp, points .. return &metric } -func summaryPointPdata(ts pdata.Timestamp, count uint64, sum float64, quantiles, values []float64) *pdata.SummaryDataPoint { - sdp := pdata.NewSummaryDataPoint() +func summaryPointPdata(ts pcommon.Timestamp, count uint64, sum float64, quantiles, values []float64) *pmetric.SummaryDataPoint { + sdp := pmetric.NewSummaryDataPoint() sdp.SetTimestamp(ts) sdp.SetCount(count) sdp.SetSum(sum) @@ -100,10 +103,10 @@ func summaryPointPdata(ts pdata.Timestamp, count uint64, sum float64, quantiles, return &sdp } -func summaryMetricPdata(name string, kvp []*kv, startTs pdata.Timestamp, points ...*pdata.SummaryDataPoint) *pdata.Metric { - metric := pdata.NewMetric() +func summaryMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.SummaryDataPoint) *pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) destPointL := metric.Summary().DataPoints() for _, point := range points { @@ -118,12 +121,12 @@ func summaryMetricPdata(name string, kvp []*kv, startTs pdata.Timestamp, points return &metric } -func sumMetricPdata(name string, kvp []*kv, startTs pdata.Timestamp, points ...*pdata.NumberDataPoint) *pdata.Metric { - metric := pdata.NewMetric() +func sumMetricPdata(name string, kvp []*kv, startTs pcommon.Timestamp, points ...*pmetric.NumberDataPoint) *pmetric.Metric { + metric := pmetric.NewMetric() metric.SetName(name) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) sum.SetIsMonotonic(true) destPointL := sum.DataPoints() diff --git a/receiver/prometheusreceiver/internal/otlp_metricfamily.go b/receiver/prometheusreceiver/internal/otlp_metricfamily.go index 6a4473e1cd5e..919d2b1ed21e 100644 --- a/receiver/prometheusreceiver/internal/otlp_metricfamily.go +++ b/receiver/prometheusreceiver/internal/otlp_metricfamily.go @@ -23,12 +23,13 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/scrape" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) type metricFamilyPdata struct { - mtype pdata.MetricDataType + mtype pmetric.MetricDataType groups map[string]*metricGroupPdata name string mc MetadataCache @@ -54,12 +55,12 @@ type metricGroupPdata struct { complexValue []*dataPoint } -var pdataStaleFlags = pdata.NewMetricDataPointFlags(pdata.MetricDataPointFlagNoRecordedValue) +var pdataStaleFlags = pmetric.NewMetricDataPointFlags(pmetric.MetricDataPointFlagNoRecordedValue) func newMetricFamilyPdata(metricName string, mc MetadataCache, logger *zap.Logger) *metricFamilyPdata { metadata, familyName := metadataForMetric(metricName, mc) mtype := convToPdataMetricType(metadata.Type) - if mtype == pdata.MetricDataTypeNone { + if mtype == pmetric.MetricDataTypeNone { logger.Debug(fmt.Sprintf("Unknown-typed metric : %s %+v", metricName, metadata)) } @@ -118,7 +119,7 @@ func (mg *metricGroupPdata) sortPoints() { }) } -func (mg *metricGroupPdata) toDistributionPoint(orderedLabelKeys []string, dest *pdata.HistogramDataPointSlice) bool { +func (mg *metricGroupPdata) toDistributionPoint(orderedLabelKeys []string, dest *pmetric.HistogramDataPointSlice) bool { if !mg.hasCount || len(mg.complexValue) == 0 { return false } @@ -172,12 +173,12 @@ func (mg *metricGroupPdata) toDistributionPoint(orderedLabelKeys []string, dest return true } -func pdataTimestampFromMs(timeAtMs int64) pdata.Timestamp { +func pdataTimestampFromMs(timeAtMs int64) pcommon.Timestamp { secs, ns := timeAtMs/1e3, (timeAtMs%1e3)*1e6 - return pdata.NewTimestampFromTime(time.Unix(secs, ns)) + return pcommon.NewTimestampFromTime(time.Unix(secs, ns)) } -func (mg *metricGroupPdata) toSummaryPoint(orderedLabelKeys []string, dest *pdata.SummaryDataPointSlice) bool { +func (mg *metricGroupPdata) toSummaryPoint(orderedLabelKeys []string, dest *pmetric.SummaryDataPointSlice) bool { // expecting count to be provided, however, in the following two cases, they can be missed. // 1. data is corrupted // 2. ignored by startValue evaluation @@ -223,8 +224,8 @@ func (mg *metricGroupPdata) toSummaryPoint(orderedLabelKeys []string, dest *pdat return true } -func (mg *metricGroupPdata) toNumberDataPoint(orderedLabelKeys []string, dest *pdata.NumberDataPointSlice) bool { - var startTsNanos pdata.Timestamp +func (mg *metricGroupPdata) toNumberDataPoint(orderedLabelKeys []string, dest *pmetric.NumberDataPointSlice) bool { + var startTsNanos pcommon.Timestamp tsNanos := pdataTimestampFromMs(mg.ts) // gauge/undefined types have no start time. if mg.family.isCumulativeTypePdata() { @@ -244,7 +245,7 @@ func (mg *metricGroupPdata) toNumberDataPoint(orderedLabelKeys []string, dest *p return true } -func populateAttributesPdata(orderedKeys []string, ls labels.Labels, dest pdata.Map) { +func populateAttributesPdata(orderedKeys []string, ls labels.Labels, dest pcommon.Map) { src := ls.Map() for _, key := range orderedKeys { if src[key] == "" { @@ -259,9 +260,9 @@ func populateAttributesPdata(orderedKeys []string, ls labels.Labels, dest pdata. var _ = (*metricFamilyPdata)(nil).updateLabelKeys func (mf *metricFamilyPdata) isCumulativeTypePdata() bool { - return mf.mtype == pdata.MetricDataTypeSum || - mf.mtype == pdata.MetricDataTypeHistogram || - mf.mtype == pdata.MetricDataTypeSummary + return mf.mtype == pmetric.MetricDataTypeSum || + mf.mtype == pmetric.MetricDataTypeHistogram || + mf.mtype == pmetric.MetricDataTypeSummary } func (mf *metricFamilyPdata) loadMetricGroupOrCreate(groupKey string, ls labels.Labels, ts int64) *metricGroupPdata { @@ -284,7 +285,7 @@ func (mf *metricFamilyPdata) Add(metricName string, ls labels.Labels, t int64, v groupKey := mf.getGroupKey(ls) mg := mf.loadMetricGroupOrCreate(groupKey, ls, t) switch mf.mtype { - case pdata.MetricDataTypeHistogram, pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeHistogram, pmetric.MetricDataTypeSummary: switch { case strings.HasSuffix(metricName, metricsSuffixSum): // always use the timestamp from sum (count is ok too), because the startTs from quantiles won't be reliable @@ -319,8 +320,8 @@ func (mf *metricFamilyPdata) getGroups() []*metricGroupPdata { return groups } -func (mf *metricFamilyPdata) ToMetricPdata(metrics *pdata.MetricSlice) (int, int) { - metric := pdata.NewMetric() +func (mf *metricFamilyPdata) ToMetricPdata(metrics *pmetric.MetricSlice) (int, int) { + metric := pmetric.NewMetric() metric.SetDataType(mf.mtype) metric.SetName(mf.name) metric.SetDescription(mf.metadata.Help) @@ -329,9 +330,9 @@ func (mf *metricFamilyPdata) ToMetricPdata(metrics *pdata.MetricSlice) (int, int pointCount := 0 switch mf.mtype { - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: histogram := metric.Histogram() - histogram.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histogram.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) hdpL := histogram.DataPoints() for _, mg := range mf.getGroups() { if !mg.toDistributionPoint(mf.labelKeysOrdered, &hdpL) { @@ -340,7 +341,7 @@ func (mf *metricFamilyPdata) ToMetricPdata(metrics *pdata.MetricSlice) (int, int } pointCount = hdpL.Len() - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: summary := metric.Summary() sdpL := summary.DataPoints() for _, mg := range mf.getGroups() { @@ -350,9 +351,9 @@ func (mf *metricFamilyPdata) ToMetricPdata(metrics *pdata.MetricSlice) (int, int } pointCount = sdpL.Len() - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: sum := metric.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) sum.SetIsMonotonic(true) sdpL := sum.DataPoints() for _, mg := range mf.getGroups() { @@ -363,7 +364,7 @@ func (mf *metricFamilyPdata) ToMetricPdata(metrics *pdata.MetricSlice) (int, int pointCount = sdpL.Len() default: // Everything else should be set to a Gauge. - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) gauge := metric.Gauge() gdpL := gauge.DataPoints() for _, mg := range mf.getGroups() { diff --git a/receiver/prometheusreceiver/internal/otlp_metricfamily_test.go b/receiver/prometheusreceiver/internal/otlp_metricfamily_test.go index d416afb0da3e..844c3ee983bd 100644 --- a/receiver/prometheusreceiver/internal/otlp_metricfamily_test.go +++ b/receiver/prometheusreceiver/internal/otlp_metricfamily_test.go @@ -24,7 +24,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/scrape" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -101,7 +102,7 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { metricName string labels labels.Labels scrapes []*scrape - want func() pdata.HistogramDataPoint + want func() pmetric.HistogramDataPoint intervalStartTimeMs int64 }{ { @@ -114,14 +115,14 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { {at: 11, value: 1004.78, metric: "histogram_sum"}, {at: 13, value: 33.7, metric: "value"}, }, - want: func() pdata.HistogramDataPoint { - point := pdata.NewHistogramDataPoint() + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() point.SetCount(10) point.SetSum(1004.78) - point.SetTimestamp(pdata.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. point.SetBucketCounts([]uint64{33}) point.SetExplicitBounds([]float64{}) - point.SetStartTimestamp(pdata.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. attributes := point.Attributes() attributes.InsertString("a", "A") attributes.InsertString("b", "B") @@ -138,13 +139,13 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { {at: 11, value: math.Float64frombits(value.StaleNaN), metric: "histogram_stale_sum"}, {at: 13, value: math.Float64frombits(value.StaleNaN), metric: "value"}, }, - want: func() pdata.HistogramDataPoint { - point := pdata.NewHistogramDataPoint() - point.SetTimestamp(pdata.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + want: func() pmetric.HistogramDataPoint { + point := pmetric.NewHistogramDataPoint() + point.SetTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. point.SetFlags(pdataStaleFlags) point.SetBucketCounts([]uint64{0}) point.SetExplicitBounds([]float64{}) - point.SetStartTimestamp(pdata.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(11 * time.Millisecond)) // the time in milliseconds -> nanoseconds. attributes := point.Attributes() attributes.InsertString("a", "A") attributes.InsertString("b", "B") @@ -165,7 +166,7 @@ func TestMetricGroupData_toDistributionUnitTest(t *testing.T) { groupKey := mp.getGroupKey(tt.labels.Copy()) require.NotNil(t, mp.groups[groupKey], "Expecting the groupKey to have a value given key:: "+groupKey) - sl := pdata.NewMetricSlice() + sl := pmetric.NewMetricSlice() mp.ToMetricPdata(&sl) require.Equal(t, 1, sl.Len(), "Exactly one metric expected") @@ -196,7 +197,7 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { tests := []struct { name string labelsScrapes []*labelsScrapes - want func() pdata.SummaryDataPoint + want func() pmetric.SummaryDataPoint }{ { name: "summary", @@ -252,8 +253,8 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { }, }, }, - want: func() pdata.SummaryDataPoint { - point := pdata.NewSummaryDataPoint() + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() point.SetCount(10) point.SetSum(15) qtL := point.QuantileValues() @@ -272,8 +273,8 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { qn99 := qtL.AppendEmpty() qn99.SetQuantile(.99) qn99.SetValue(82) - point.SetTimestamp(pdata.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. - point.SetStartTimestamp(pdata.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds attributes := point.Attributes() attributes.InsertString("a", "A") attributes.InsertString("b", "B") @@ -334,8 +335,8 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { }, }, }, - want: func() pdata.SummaryDataPoint { - point := pdata.NewSummaryDataPoint() + want: func() pmetric.SummaryDataPoint { + point := pmetric.NewSummaryDataPoint() qtL := point.QuantileValues() qn0 := qtL.AppendEmpty() point.SetFlags(pdataStaleFlags) @@ -353,8 +354,8 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { qn99 := qtL.AppendEmpty() qn99.SetQuantile(.99) qn99.SetValue(0) - point.SetTimestamp(pdata.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. - point.SetStartTimestamp(pdata.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds + point.SetTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(14 * time.Millisecond)) // the time in milliseconds -> nanoseconds attributes := point.Attributes() attributes.InsertString("a", "A") attributes.InsertString("b", "B") @@ -381,7 +382,7 @@ func TestMetricGroupData_toSummaryUnitTest(t *testing.T) { } require.NotNil(t, mp.groups[groupKey], "Expecting the groupKey to have a value given key:: "+groupKey) - sl := pdata.NewMetricSlice() + sl := pmetric.NewMetricSlice() mp.ToMetricPdata(&sl) require.Equal(t, 1, sl.Len(), "Exactly one metric expected") @@ -410,7 +411,7 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { labels labels.Labels scrapes []*scrape intervalStartTimestampMs int64 - want func() pdata.NumberDataPoint + want func() pmetric.NumberDataPoint }{ { metricKind: "counter", @@ -420,11 +421,11 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { scrapes: []*scrape{ {at: 13, value: 33.7, metric: "value"}, }, - want: func() pdata.NumberDataPoint { - point := pdata.NewNumberDataPoint() + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() point.SetDoubleVal(33.7) - point.SetTimestamp(pdata.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. - point.SetStartTimestamp(pdata.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(13 * time.Millisecond)) // the time in milliseconds -> nanoseconds. attributes := point.Attributes() attributes.InsertString("a", "A") attributes.InsertString("b", "B") @@ -439,11 +440,11 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { scrapes: []*scrape{ {at: 28, value: 99.9, metric: "value"}, }, - want: func() pdata.NumberDataPoint { - point := pdata.NewNumberDataPoint() + want: func() pmetric.NumberDataPoint { + point := pmetric.NewNumberDataPoint() point.SetDoubleVal(99.9) - point.SetTimestamp(pdata.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. - point.SetStartTimestamp(pdata.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetTimestamp(pcommon.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. + point.SetStartTimestamp(pcommon.Timestamp(28 * time.Millisecond)) // the time in milliseconds -> nanoseconds. attributes := point.Attributes() attributes.InsertString("a", "A") attributes.InsertString("b", "B") @@ -464,7 +465,7 @@ func TestMetricGroupData_toNumberDataUnitTest(t *testing.T) { groupKey := mp.getGroupKey(tt.labels.Copy()) require.NotNil(t, mp.groups[groupKey], "Expecting the groupKey to have a value given key:: "+groupKey) - sl := pdata.NewMetricSlice() + sl := pmetric.NewMetricSlice() mp.ToMetricPdata(&sl) require.Equal(t, 1, sl.Len(), "Exactly one metric expected") diff --git a/receiver/prometheusreceiver/internal/otlp_metrics_adjuster.go b/receiver/prometheusreceiver/internal/otlp_metrics_adjuster.go index 060d14f32d08..3f5bbcf9d75b 100644 --- a/receiver/prometheusreceiver/internal/otlp_metrics_adjuster.go +++ b/receiver/prometheusreceiver/internal/otlp_metrics_adjuster.go @@ -20,7 +20,8 @@ import ( "sync" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -62,8 +63,8 @@ import ( // resets. type timeseriesinfoPdata struct { mark bool - initial *pdata.Metric - previous *pdata.Metric + initial *pmetric.Metric + previous *pmetric.Metric } // timeseriesMap maps from a timeseries instance (metric * label values) to the timeseries info for @@ -78,12 +79,12 @@ type timeseriesMapPdata struct { } // Get the timeseriesinfo for the timeseries associated with the metric and label values. -func (tsm *timeseriesMapPdata) get(metric *pdata.Metric, kv pdata.Map) *timeseriesinfoPdata { +func (tsm *timeseriesMapPdata) get(metric *pmetric.Metric, kv pcommon.Map) *timeseriesinfoPdata { // This should only be invoked be functions called (directly or indirectly) by AdjustMetricSlice(). // The lock protecting tsm.tsiMap is acquired there. name := metric.Name() sig := getTimeseriesSignaturePdata(name, kv) - if metric.DataType() == pdata.MetricDataTypeHistogram { + if metric.DataType() == pmetric.MetricDataTypeHistogram { // There are 2 types of Histograms whose aggregation temporality needs distinguishing: // * CumulativeHistogram // * GaugeHistogram @@ -101,9 +102,9 @@ func (tsm *timeseriesMapPdata) get(metric *pdata.Metric, kv pdata.Map) *timeseri } // Create a unique timeseries signature consisting of the metric name and label values. -func getTimeseriesSignaturePdata(name string, kv pdata.Map) string { +func getTimeseriesSignaturePdata(name string, kv pcommon.Map) string { labelValues := make([]string, 0, kv.Len()) - kv.Sort().Range(func(_ string, attrValue pdata.Value) bool { + kv.Sort().Range(func(_ string, attrValue pcommon.Value) bool { value := attrValue.StringVal() if value != "" { labelValues = append(labelValues, value) @@ -224,7 +225,7 @@ func NewMetricsAdjusterPdata(tsm *timeseriesMapPdata, logger *zap.Logger) *Metri // AdjustMetricSlice takes a sequence of metrics and adjust their start times based on the initial and // previous points in the timeseriesMap. // Returns the total number of timeseries that had reset start times. -func (ma *MetricsAdjusterPdata) AdjustMetricSlice(metricL *pdata.MetricSlice) int { +func (ma *MetricsAdjusterPdata) AdjustMetricSlice(metricL *pmetric.MetricSlice) int { resets := 0 // The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that // nothing else can modify the data used for adjustment. @@ -240,7 +241,7 @@ func (ma *MetricsAdjusterPdata) AdjustMetricSlice(metricL *pdata.MetricSlice) in // AdjustMetrics takes a sequence of metrics and adjust their start times based on the initial and // previous points in the timeseriesMap. // Returns the total number of timeseries that had reset start times. -func (ma *MetricsAdjusterPdata) AdjustMetrics(metrics *pdata.Metrics) int { +func (ma *MetricsAdjusterPdata) AdjustMetrics(metrics *pmetric.Metrics) int { resets := 0 // The lock on the relevant timeseriesMap is held throughout the adjustment process to ensure that // nothing else can modify the data used for adjustment. @@ -260,9 +261,9 @@ func (ma *MetricsAdjusterPdata) AdjustMetrics(metrics *pdata.Metrics) int { } // Returns the number of timeseries with reset start times. -func (ma *MetricsAdjusterPdata) adjustMetric(metric *pdata.Metric) int { +func (ma *MetricsAdjusterPdata) adjustMetric(metric *pmetric.Metric) int { switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: // gauges don't need to be adjusted so no additional processing is necessary return 0 default: @@ -271,18 +272,18 @@ func (ma *MetricsAdjusterPdata) adjustMetric(metric *pdata.Metric) int { } // Returns the number of timeseries that had reset start times. -func (ma *MetricsAdjusterPdata) adjustMetricPoints(metric *pdata.Metric) int { +func (ma *MetricsAdjusterPdata) adjustMetricPoints(metric *pmetric.Metric) int { switch dataType := metric.DataType(); dataType { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return ma.adjustMetricGauge(metric) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return ma.adjustMetricHistogram(metric) - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return ma.adjustMetricSummary(metric) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return ma.adjustMetricSum(metric) default: @@ -294,7 +295,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricPoints(metric *pdata.Metric) int { // Returns true if 'current' was adjusted and false if 'current' is an the initial occurrence or a // reset of the timeseries. -func (ma *MetricsAdjusterPdata) adjustMetricGauge(current *pdata.Metric) (resets int) { +func (ma *MetricsAdjusterPdata) adjustMetricGauge(current *pmetric.Metric) (resets int) { currentPoints := current.Gauge().DataPoints() for i := 0; i < currentPoints.Len(); i++ { @@ -342,9 +343,9 @@ func (ma *MetricsAdjusterPdata) adjustMetricGauge(current *pdata.Metric) (resets return } -func (ma *MetricsAdjusterPdata) adjustMetricHistogram(current *pdata.Metric) (resets int) { +func (ma *MetricsAdjusterPdata) adjustMetricHistogram(current *pmetric.Metric) (resets int) { histogram := current.Histogram() - if histogram.AggregationTemporality() != pdata.MetricAggregationTemporalityCumulative { + if histogram.AggregationTemporality() != pmetric.MetricAggregationTemporalityCumulative { // Only dealing with CumulativeDistributions. return 0 } @@ -367,7 +368,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricHistogram(current *pdata.Metric) (re previous = tsi.initial } - if !currentDist.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if !currentDist.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { tsi.previous = current } @@ -390,7 +391,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricHistogram(current *pdata.Metric) (re resets++ continue } - if currentDist.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if currentDist.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { currentDist.SetStartTimestamp(initialPoints.At(i).StartTimestamp()) continue } @@ -407,7 +408,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricHistogram(current *pdata.Metric) (re return } -func (ma *MetricsAdjusterPdata) adjustMetricSum(current *pdata.Metric) (resets int) { +func (ma *MetricsAdjusterPdata) adjustMetricSum(current *pmetric.Metric) (resets int) { currentPoints := current.Sum().DataPoints() for i := 0; i < currentPoints.Len(); i++ { @@ -421,7 +422,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricSum(current *pdata.Metric) (resets i previous = tsi.initial } - if !currentSum.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if !currentSum.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { tsi.previous = current } @@ -443,7 +444,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricSum(current *pdata.Metric) (resets i resets++ continue } - if currentSum.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if currentSum.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { currentSum.SetStartTimestamp(initialPoints.At(i).StartTimestamp()) continue } @@ -461,7 +462,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricSum(current *pdata.Metric) (resets i return } -func (ma *MetricsAdjusterPdata) adjustMetricSummary(current *pdata.Metric) (resets int) { +func (ma *MetricsAdjusterPdata) adjustMetricSummary(current *pmetric.Metric) (resets int) { currentPoints := current.Summary().DataPoints() for i := 0; i < currentPoints.Len(); i++ { @@ -475,7 +476,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricSummary(current *pdata.Metric) (rese previous = tsi.initial } - if !currentSummary.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if !currentSummary.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { tsi.previous = current } @@ -497,7 +498,7 @@ func (ma *MetricsAdjusterPdata) adjustMetricSummary(current *pdata.Metric) (rese resets++ continue } - if currentSummary.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if currentSummary.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { currentSummary.SetStartTimestamp(initialPoints.At(i).StartTimestamp()) continue } diff --git a/receiver/prometheusreceiver/internal/otlp_metrics_adjuster_test.go b/receiver/prometheusreceiver/internal/otlp_metrics_adjuster_test.go index e6365658de8b..4fd78fcdfce5 100644 --- a/receiver/prometheusreceiver/internal/otlp_metrics_adjuster_test.go +++ b/receiver/prometheusreceiver/internal/otlp_metrics_adjuster_test.go @@ -20,16 +20,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) var ( - pdt1Ms = pdata.Timestamp(time.Unix(0, 1000000).UnixNano()) - pdt2Ms = pdata.Timestamp(time.Unix(0, 2000000).UnixNano()) - pdt3Ms = pdata.Timestamp(time.Unix(0, 3000000).UnixNano()) - pdt4Ms = pdata.Timestamp(time.Unix(0, 5000000).UnixNano()) - pdt5Ms = pdata.Timestamp(time.Unix(0, 5000000).UnixNano()) + pdt1Ms = pcommon.Timestamp(time.Unix(0, 1000000).UnixNano()) + pdt2Ms = pcommon.Timestamp(time.Unix(0, 2000000).UnixNano()) + pdt3Ms = pcommon.Timestamp(time.Unix(0, 3000000).UnixNano()) + pdt4Ms = pcommon.Timestamp(time.Unix(0, 5000000).UnixNano()) + pdt5Ms = pcommon.Timestamp(time.Unix(0, 5000000).UnixNano()) bounds0 = []float64{1, 2, 4} percent0 = []float64{10, 50, 90} @@ -43,10 +44,10 @@ func Test_gauge_pdata(t *testing.T) { script := []*metricsAdjusterTestPdata{ { "Gauge: round 1 - gauge not adjusted", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) m0.SetName("gauge1") g0 := m0.Gauge() pt0 := g0.DataPoints().AppendEmpty() @@ -56,10 +57,10 @@ func Test_gauge_pdata(t *testing.T) { pt0.SetDoubleVal(44) return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) m0.SetName("gauge1") g0 := m0.Gauge() pt0 := g0.DataPoints().AppendEmpty() @@ -73,10 +74,10 @@ func Test_gauge_pdata(t *testing.T) { }, { "Gauge: round 2 - gauge not adjusted", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) m0.SetName("gauge1") g0 := m0.Gauge() pt0 := g0.DataPoints().AppendEmpty() @@ -88,10 +89,10 @@ func Test_gauge_pdata(t *testing.T) { return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) m0.SetName("gauge1") g0 := m0.Gauge() pt0 := g0.DataPoints().AppendEmpty() @@ -106,10 +107,10 @@ func Test_gauge_pdata(t *testing.T) { }, { "Gauge: round 3 - value less than previous value - gauge is not adjusted", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) m0.SetName("gauge1") g0 := m0.Gauge() pt0 := g0.DataPoints().AppendEmpty() @@ -121,10 +122,10 @@ func Test_gauge_pdata(t *testing.T) { return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) m0.SetName("gauge1") g0 := m0.Gauge() pt0 := g0.DataPoints().AppendEmpty() @@ -146,10 +147,10 @@ func Test_cumulative_pdata(t *testing.T) { script := []*metricsAdjusterTestPdata{ { "Cumulative: round 1 - initial instance, start time is established", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -161,10 +162,10 @@ func Test_cumulative_pdata(t *testing.T) { return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -180,10 +181,10 @@ func Test_cumulative_pdata(t *testing.T) { }, { "Cumulative: round 2 - instance adjusted based on round 1", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -195,10 +196,10 @@ func Test_cumulative_pdata(t *testing.T) { return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -214,10 +215,10 @@ func Test_cumulative_pdata(t *testing.T) { }, { "Cumulative: round 3 - instance reset (value less than previous value), start time is reset", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -229,10 +230,10 @@ func Test_cumulative_pdata(t *testing.T) { return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -248,10 +249,10 @@ func Test_cumulative_pdata(t *testing.T) { }, { "Cumulative: round 4 - instance adjusted based on round 3", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -263,10 +264,10 @@ func Test_cumulative_pdata(t *testing.T) { return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -282,10 +283,10 @@ func Test_cumulative_pdata(t *testing.T) { }, { "Cumulative: round 5 - instance adjusted based on round 4", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -297,10 +298,10 @@ func Test_cumulative_pdata(t *testing.T) { return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) m0.SetName("cumulative1") g0 := m0.Sum() pt0 := g0.DataPoints().AppendEmpty() @@ -318,7 +319,7 @@ func Test_cumulative_pdata(t *testing.T) { runScriptPdata(t, NewJobsMapPdata(time.Minute).get("job", "0"), script) } -func populateSummary(sdp *pdata.SummaryDataPoint, timestamp pdata.Timestamp, count uint64, sum float64, quantilePercents, quantileValues []float64) { +func populateSummary(sdp *pmetric.SummaryDataPoint, timestamp pcommon.Timestamp, count uint64, sum float64, quantilePercents, quantileValues []float64) { quantiles := sdp.QuantileValues() for i := range quantilePercents { qv := quantiles.AppendEmpty() @@ -334,10 +335,10 @@ func Test_summary_no_count_pdata(t *testing.T) { script := []*metricsAdjusterTestPdata{ { "Summary No Count: round 1 - initial instance, start time is established", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -345,10 +346,10 @@ func Test_summary_no_count_pdata(t *testing.T) { populateSummary(&pt0, pdt1Ms, 10, 40, percent0, []float64{1, 5, 8}) return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -360,10 +361,10 @@ func Test_summary_no_count_pdata(t *testing.T) { }, { "Summary No Count: round 2 - instance adjusted based on round 1", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -371,10 +372,10 @@ func Test_summary_no_count_pdata(t *testing.T) { populateSummary(&pt0, pdt2Ms, 15, 70, percent0, []float64{7, 44, 9}) return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -386,10 +387,10 @@ func Test_summary_no_count_pdata(t *testing.T) { }, { "Summary No Count: round 3 - instance reset (count less than previous), start time is reset", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -397,10 +398,10 @@ func Test_summary_no_count_pdata(t *testing.T) { populateSummary(&pt0, pdt3Ms, 12, 66, percent0, []float64{3, 22, 5}) return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -412,10 +413,10 @@ func Test_summary_no_count_pdata(t *testing.T) { }, { "Summary No Count: round 4 - instance adjusted based on round 3", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -424,10 +425,10 @@ func Test_summary_no_count_pdata(t *testing.T) { pt0.SetStartTimestamp(pdt4Ms) return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -446,10 +447,10 @@ func Test_summary_flag_norecordedvalue(t *testing.T) { script := []*metricsAdjusterTestPdata{ { "Summary No Count: round 1 - initial instance, start time is established", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -459,10 +460,10 @@ func Test_summary_flag_norecordedvalue(t *testing.T) { populateSummary(&pt0, pdt1Ms, 10, 40, percent0, []float64{1, 5, 8}) return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -476,10 +477,10 @@ func Test_summary_flag_norecordedvalue(t *testing.T) { }, { "Summary Flag NoRecordedValue: round 2 - instance adjusted based on round 1", - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -489,10 +490,10 @@ func Test_summary_flag_norecordedvalue(t *testing.T) { pt0.SetFlags(1) return &mL }(), - func() *pdata.MetricSlice { - mL := pdata.NewMetricSlice() + func() *pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) m0.SetName("summary1") s0 := m0.Summary() pt0 := s0.DataPoints().AppendEmpty() @@ -566,8 +567,8 @@ var ( sumMetric = sumMetricPdata ) -func metricSlice(metrics ...*pdata.Metric) *pdata.MetricSlice { - ms := pdata.NewMetricSlice() +func metricSlice(metrics ...*pmetric.Metric) *pmetric.MetricSlice { + ms := pmetric.NewMetricSlice() for _, metric := range metrics { destMetric := ms.AppendEmpty() metric.CopyTo(destMetric) @@ -629,24 +630,24 @@ func Test_histogram_flag_norecordedvalue(t *testing.T) { }, { "Histogram: round 2 - instance adjusted based on round 1", - func() *pdata.MetricSlice { - metric := pdata.NewMetric() + func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) histogram := metric.Histogram() - histogram.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histogram.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) destPointL := histogram.DataPoints() dp := destPointL.AppendEmpty() dp.SetTimestamp(pdt2Ms) dp.SetFlags(1) return metricSlice(histogramMetric(cd1, k1v1k2v2, pdt2Ms, &dp)) }(), - func() *pdata.MetricSlice { - metric := pdata.NewMetric() + func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) histogram := metric.Histogram() - histogram.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histogram.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) destPointL := histogram.DataPoints() dp := destPointL.AppendEmpty() dp.SetTimestamp(pdt2Ms) @@ -661,24 +662,24 @@ func Test_histogram_flag_norecordedvalue(t *testing.T) { } func Test_histogram_flag_norecordedvalue_first_observation(t *testing.T) { - m1 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m1 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) histogram := metric.Histogram() - histogram.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histogram.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) destPointL := histogram.DataPoints() dp := destPointL.AppendEmpty() dp.SetTimestamp(pdt1Ms) dp.SetFlags(1) return metricSlice(histogramMetric(cd1, k1v1k2v2, pdt1Ms, &dp)) }() - m2 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m2 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeHistogram) + metric.SetDataType(pmetric.MetricDataTypeHistogram) histogram := metric.Histogram() - histogram.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + histogram.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) destPointL := histogram.DataPoints() dp := destPointL.AppendEmpty() dp.SetTimestamp(pdt2Ms) @@ -704,10 +705,10 @@ func Test_histogram_flag_norecordedvalue_first_observation(t *testing.T) { } func Test_summary_flag_norecordedvalue_first_observation(t *testing.T) { - m1 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m1 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) summary := metric.Summary() destPointL := summary.DataPoints() dp := destPointL.AppendEmpty() @@ -715,10 +716,10 @@ func Test_summary_flag_norecordedvalue_first_observation(t *testing.T) { dp.SetFlags(1) return metricSlice(summaryMetric(cd1, k1v1k2v2, pdt1Ms, &dp)) }() - m2 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m2 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeSummary) + metric.SetDataType(pmetric.MetricDataTypeSummary) summary := metric.Summary() destPointL := summary.DataPoints() dp := destPointL.AppendEmpty() @@ -745,10 +746,10 @@ func Test_summary_flag_norecordedvalue_first_observation(t *testing.T) { } func Test_gauge_flag_norecordedvalue_first_observation(t *testing.T) { - m1 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m1 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) gauge := metric.Gauge() destPointL := gauge.DataPoints() dp := destPointL.AppendEmpty() @@ -756,10 +757,10 @@ func Test_gauge_flag_norecordedvalue_first_observation(t *testing.T) { dp.SetFlags(1) return metricSlice(gaugeMetric(cd1, k1v1k2v2, pdt1Ms, &dp)) }() - m2 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m2 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) gauge := metric.Gauge() destPointL := gauge.DataPoints() dp := destPointL.AppendEmpty() @@ -786,24 +787,24 @@ func Test_gauge_flag_norecordedvalue_first_observation(t *testing.T) { } func Test_sum_flag_norecordedvalue_first_observation(t *testing.T) { - m1 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m1 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) destPointL := sum.DataPoints() dp := destPointL.AppendEmpty() dp.SetTimestamp(pdt1Ms) dp.SetFlags(1) return metricSlice(sumMetric(cd1, k1v1k2v2, pdt1Ms, &dp)) }() - m2 := func() *pdata.MetricSlice { - metric := pdata.NewMetric() + m2 := func() *pmetric.MetricSlice { + metric := pmetric.NewMetric() metric.SetName(cd1) - metric.SetDataType(pdata.MetricDataTypeSum) + metric.SetDataType(pmetric.MetricDataTypeSum) sum := metric.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) destPointL := sum.DataPoints() dp := destPointL.AppendEmpty() dp.SetTimestamp(pdt2Ms) @@ -1075,7 +1076,7 @@ func Test_jobGC_pdata(t *testing.T) { }, } - emptyMetricSlice := func() *pdata.MetricSlice { ms := pdata.NewMetricSlice(); return &ms } + emptyMetricSlice := func() *pmetric.MetricSlice { ms := pmetric.NewMetricSlice(); return &ms } job2Script1 := []*metricsAdjusterTestPdata{ { "JobGC: job2, round 1 - no metrics adjusted, just trigger gc", @@ -1125,8 +1126,8 @@ func Test_jobGC_pdata(t *testing.T) { type metricsAdjusterTestPdata struct { description string - metrics *pdata.MetricSlice - adjusted *pdata.MetricSlice + metrics *pmetric.MetricSlice + adjusted *pmetric.MetricSlice resets int } diff --git a/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go b/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go index 3b855ad20e66..346173f34611 100644 --- a/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go +++ b/receiver/prometheusreceiver/internal/otlp_metricsbuilder.go @@ -23,28 +23,28 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) -func isUsefulLabelPdata(mType pdata.MetricDataType, labelKey string) bool { +func isUsefulLabelPdata(mType pmetric.MetricDataType, labelKey string) bool { switch labelKey { case model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel: return false case model.BucketLabel: - return mType != pdata.MetricDataTypeHistogram + return mType != pmetric.MetricDataTypeHistogram case model.QuantileLabel: - return mType != pdata.MetricDataTypeSummary + return mType != pmetric.MetricDataTypeSummary } return true } -func getBoundaryPdata(metricType pdata.MetricDataType, labels labels.Labels) (float64, error) { +func getBoundaryPdata(metricType pmetric.MetricDataType, labels labels.Labels) (float64, error) { labelName := "" switch metricType { - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: labelName = model.BucketLabel - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: labelName = model.QuantileLabel default: return 0, errNoBoundaryLabel @@ -58,30 +58,30 @@ func getBoundaryPdata(metricType pdata.MetricDataType, labels labels.Labels) (fl return strconv.ParseFloat(v, 64) } -func convToPdataMetricType(metricType textparse.MetricType) pdata.MetricDataType { +func convToPdataMetricType(metricType textparse.MetricType) pmetric.MetricDataType { switch metricType { case textparse.MetricTypeCounter: // always use float64, as it's the internal data type used in prometheus - return pdata.MetricDataTypeSum + return pmetric.MetricDataTypeSum // textparse.MetricTypeUnknown is converted to gauge by default to prevent Prometheus untyped metrics from being dropped case textparse.MetricTypeGauge, textparse.MetricTypeUnknown: - return pdata.MetricDataTypeGauge + return pmetric.MetricDataTypeGauge case textparse.MetricTypeHistogram: - return pdata.MetricDataTypeHistogram + return pmetric.MetricDataTypeHistogram // dropping support for gaugehistogram for now until we have an official spec of its implementation // a draft can be found in: https://docs.google.com/document/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit#heading=h.1cvzqd4ksd23 // case textparse.MetricTypeGaugeHistogram: // return case textparse.MetricTypeSummary: - return pdata.MetricDataTypeSummary + return pmetric.MetricDataTypeSummary default: // including: textparse.MetricTypeGaugeHistogram, textparse.MetricTypeInfo, textparse.MetricTypeStateset - return pdata.MetricDataTypeNone + return pmetric.MetricDataTypeNone } } type metricBuilderPdata struct { - metrics pdata.MetricSlice + metrics pmetric.MetricSlice families map[string]*metricFamilyPdata hasData bool hasInternalMetric bool @@ -96,7 +96,7 @@ type metricBuilderPdata struct { } // newMetricBuilder creates a MetricBuilder which is allowed to feed all the datapoints from a single prometheus -// scraped page by calling its AddDataPoint function, and turn them into a pdata.Metrics object. +// scraped page by calling its AddDataPoint function, and turn them into a pmetric.Metrics object. // by calling its Build function func newMetricBuilderPdata(mc MetadataCache, useStartTimeMetric bool, startTimeMetricRegex string, logger *zap.Logger, intervalStartTimeMs int64) *metricBuilderPdata { var regex *regexp.Regexp @@ -104,7 +104,7 @@ func newMetricBuilderPdata(mc MetadataCache, useStartTimeMetric bool, startTimeM regex, _ = regexp.Compile(startTimeMetricRegex) } return &metricBuilderPdata{ - metrics: pdata.NewMetricSlice(), + metrics: pmetric.NewMetricSlice(), families: map[string]*metricFamilyPdata{}, mc: mc, logger: logger, @@ -186,12 +186,12 @@ func (b *metricBuilderPdata) AddDataPoint(ls labels.Labels, t int64, v float64) return curMF.Add(metricName, ls, t, v) } -// Build an pdata.MetricSlice based on all added data complexValue. +// Build an pmetric.MetricSlice based on all added data complexValue. // The only error returned by this function is errNoDataToBuild. -func (b *metricBuilderPdata) Build() (*pdata.MetricSlice, int, int, error) { +func (b *metricBuilderPdata) Build() (*pmetric.MetricSlice, int, int, error) { if !b.hasData { if b.hasInternalMetric { - metricsL := pdata.NewMetricSlice() + metricsL := pmetric.NewMetricSlice() return &metricsL, 0, 0, nil } return nil, 0, 0, errNoDataToBuild diff --git a/receiver/prometheusreceiver/internal/otlp_metricsbuilder_test.go b/receiver/prometheusreceiver/internal/otlp_metricsbuilder_test.go index 692b47cf6deb..e5ce2afc8d73 100644 --- a/receiver/prometheusreceiver/internal/otlp_metricsbuilder_test.go +++ b/receiver/prometheusreceiver/internal/otlp_metricsbuilder_test.go @@ -22,7 +22,8 @@ import ( "github.com/prometheus/prometheus/model/textparse" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -107,14 +108,14 @@ func Test_startTimeMetricMatch_pdata(t *testing.T) { func TestGetBoundaryPdata(t *testing.T) { tests := []struct { name string - mtype pdata.MetricDataType + mtype pmetric.MetricDataType labels labels.Labels wantValue float64 wantErr string }{ { name: "cumulative histogram with bucket label", - mtype: pdata.MetricDataTypeHistogram, + mtype: pmetric.MetricDataTypeHistogram, labels: labels.Labels{ {Name: model.BucketLabel, Value: "0.256"}, }, @@ -122,7 +123,7 @@ func TestGetBoundaryPdata(t *testing.T) { }, { name: "gauge histogram with bucket label", - mtype: pdata.MetricDataTypeHistogram, + mtype: pmetric.MetricDataTypeHistogram, labels: labels.Labels{ {Name: model.BucketLabel, Value: "11.71"}, }, @@ -130,7 +131,7 @@ func TestGetBoundaryPdata(t *testing.T) { }, { name: "summary with bucket label", - mtype: pdata.MetricDataTypeSummary, + mtype: pmetric.MetricDataTypeSummary, labels: labels.Labels{ {Name: model.BucketLabel, Value: "11.71"}, }, @@ -138,7 +139,7 @@ func TestGetBoundaryPdata(t *testing.T) { }, { name: "summary with quantile label", - mtype: pdata.MetricDataTypeSummary, + mtype: pmetric.MetricDataTypeSummary, labels: labels.Labels{ {Name: model.QuantileLabel, Value: "92.88"}, }, @@ -146,7 +147,7 @@ func TestGetBoundaryPdata(t *testing.T) { }, { name: "gauge histogram mismatched with bucket label", - mtype: pdata.MetricDataTypeSummary, + mtype: pmetric.MetricDataTypeSummary, labels: labels.Labels{ {Name: model.BucketLabel, Value: "11.71"}, }, @@ -154,7 +155,7 @@ func TestGetBoundaryPdata(t *testing.T) { }, { name: "other data types without matches", - mtype: pdata.MetricDataTypeGauge, + mtype: pmetric.MetricDataTypeGauge, labels: labels.Labels{ {Name: model.BucketLabel, Value: "11.71"}, }, @@ -182,42 +183,42 @@ func TestConvToPdataMetricType(t *testing.T) { tests := []struct { name string mtype textparse.MetricType - want pdata.MetricDataType + want pmetric.MetricDataType }{ { name: "textparse.counter", mtype: textparse.MetricTypeCounter, - want: pdata.MetricDataTypeSum, + want: pmetric.MetricDataTypeSum, }, { name: "textparse.gauge", mtype: textparse.MetricTypeGauge, - want: pdata.MetricDataTypeGauge, + want: pmetric.MetricDataTypeGauge, }, { name: "textparse.unknown", mtype: textparse.MetricTypeUnknown, - want: pdata.MetricDataTypeGauge, + want: pmetric.MetricDataTypeGauge, }, { name: "textparse.histogram", mtype: textparse.MetricTypeHistogram, - want: pdata.MetricDataTypeHistogram, + want: pmetric.MetricDataTypeHistogram, }, { name: "textparse.summary", mtype: textparse.MetricTypeSummary, - want: pdata.MetricDataTypeSummary, + want: pmetric.MetricDataTypeSummary, }, { name: "textparse.metric_type_info", mtype: textparse.MetricTypeInfo, - want: pdata.MetricDataTypeNone, + want: pmetric.MetricDataTypeNone, }, { name: "textparse.metric_state_set", mtype: textparse.MetricTypeStateset, - want: pdata.MetricDataTypeNone, + want: pmetric.MetricDataTypeNone, }, } @@ -233,7 +234,7 @@ func TestConvToPdataMetricType(t *testing.T) { func TestIsUsefulLabelPdata(t *testing.T) { tests := []struct { name string - mtypes []pdata.MetricDataType + mtypes []pmetric.MetricDataType labelKeys []string want bool }{ @@ -242,36 +243,36 @@ func TestIsUsefulLabelPdata(t *testing.T) { labelKeys: []string{ model.MetricNameLabel, model.InstanceLabel, model.SchemeLabel, model.MetricsPathLabel, model.JobLabel, }, - mtypes: []pdata.MetricDataType{ - pdata.MetricDataTypeSum, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeHistogram, - pdata.MetricDataTypeSummary, - pdata.MetricDataTypeSum, - pdata.MetricDataTypeNone, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeSum, + mtypes: []pmetric.MetricDataType{ + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeHistogram, + pmetric.MetricDataTypeSummary, + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeNone, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeSum, }, want: false, }, { name: `bucket label with non "int_histogram", "histogram":: useful`, - mtypes: []pdata.MetricDataType{ - pdata.MetricDataTypeSum, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeSummary, - pdata.MetricDataTypeSum, - pdata.MetricDataTypeNone, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeSum, + mtypes: []pmetric.MetricDataType{ + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeSummary, + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeNone, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeSum, }, labelKeys: []string{model.BucketLabel}, want: true, }, { name: `quantile label with "summary": non-useful`, - mtypes: []pdata.MetricDataType{ - pdata.MetricDataTypeSummary, + mtypes: []pmetric.MetricDataType{ + pmetric.MetricDataTypeSummary, }, labelKeys: []string{model.QuantileLabel}, want: false, @@ -279,29 +280,29 @@ func TestIsUsefulLabelPdata(t *testing.T) { { name: `quantile label with non-"summary": useful`, labelKeys: []string{model.QuantileLabel}, - mtypes: []pdata.MetricDataType{ - pdata.MetricDataTypeSum, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeHistogram, - pdata.MetricDataTypeSum, - pdata.MetricDataTypeNone, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeSum, + mtypes: []pmetric.MetricDataType{ + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeHistogram, + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeNone, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeSum, }, want: true, }, { name: `any other label with any type:: useful`, labelKeys: []string{"any_label", "foo.bar"}, - mtypes: []pdata.MetricDataType{ - pdata.MetricDataTypeSum, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeHistogram, - pdata.MetricDataTypeSummary, - pdata.MetricDataTypeSum, - pdata.MetricDataTypeNone, - pdata.MetricDataTypeGauge, - pdata.MetricDataTypeSum, + mtypes: []pmetric.MetricDataType{ + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeHistogram, + pmetric.MetricDataTypeSummary, + pmetric.MetricDataTypeSum, + pmetric.MetricDataTypeNone, + pmetric.MetricDataTypeGauge, + pmetric.MetricDataTypeSum, }, want: true, }, @@ -323,7 +324,7 @@ func TestIsUsefulLabelPdata(t *testing.T) { type buildTestDataPdata struct { name string inputs []*testScrapedPage - wants func() []*pdata.MetricSlice + wants func() []*pmetric.MetricSlice } func Test_OTLPMetricBuilder_counters(t *testing.T) { @@ -337,13 +338,13 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() m0.SetName("counter_test") - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) sum := m0.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) sum.SetIsMonotonic(true) pt0 := sum.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) @@ -351,7 +352,7 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { pt0.SetTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL} + return []*pmetric.MetricSlice{&mL} }, }, { @@ -364,13 +365,13 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() m0.SetName("counter_test") - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) sum := m0.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) sum.SetIsMonotonic(true) pt0 := sum.DataPoints().AppendEmpty() pt0.SetDoubleVal(150.0) @@ -384,7 +385,7 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { pt1.SetTimestamp(startTsNanos) pt1.Attributes().InsertString("foo", "other") - return []*pdata.MetricSlice{&mL} + return []*pmetric.MetricSlice{&mL} }, }, { @@ -398,13 +399,13 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("counter_test") - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) sum0 := m0.Sum() - sum0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) sum0.SetIsMonotonic(true) pt0 := sum0.DataPoints().AppendEmpty() pt0.SetDoubleVal(150.0) @@ -420,9 +421,9 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { m1 := mL0.AppendEmpty() m1.SetName("counter_test2") - m1.SetDataType(pdata.MetricDataTypeSum) + m1.SetDataType(pmetric.MetricDataTypeSum) sum1 := m1.Sum() - sum1.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum1.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) sum1.SetIsMonotonic(true) pt2 := sum1.DataPoints().AppendEmpty() pt2.SetDoubleVal(100.0) @@ -430,7 +431,7 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { pt2.SetTimestamp(startTsNanos) pt2.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -442,13 +443,13 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL := pmetric.NewMetricSlice() m0 := mL.AppendEmpty() m0.SetName("poor_name_count") - m0.SetDataType(pdata.MetricDataTypeSum) + m0.SetDataType(pmetric.MetricDataTypeSum) sum := m0.Sum() - sum.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + sum.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) sum.SetIsMonotonic(true) pt0 := sum.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) @@ -456,7 +457,7 @@ func Test_OTLPMetricBuilder_counters(t *testing.T) { pt0.SetTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL} + return []*pmetric.MetricSlice{&mL} }, }, } @@ -489,12 +490,12 @@ func runBuilderTestsPdata(t *testing.T, tests []buildTestDataPdata) { } } -func assertEquivalentMetricsPdata(t *testing.T, want, got *pdata.MetricSlice) { +func assertEquivalentMetricsPdata(t *testing.T, want, got *pmetric.MetricSlice) { if !assert.Equal(t, want.Len(), got.Len()) { return } - wmap := map[string]pdata.Metric{} - gmap := map[string]pdata.Metric{} + wmap := map[string]pmetric.Metric{} + gmap := map[string]pmetric.Metric{} for i := 0; i < want.Len(); i++ { wi := want.At(i) @@ -507,8 +508,8 @@ func assertEquivalentMetricsPdata(t *testing.T, want, got *pdata.MetricSlice) { } var ( - startTsNanos = pdata.Timestamp(startTs * 1e6) - startTsPlusIntervalNanos = pdata.Timestamp((startTs + interval) * 1e6) + startTsNanos = pcommon.Timestamp(startTs * 1e6) + startTsPlusIntervalNanos = pcommon.Timestamp((startTs + interval) * 1e6) ) func Test_OTLPMetricBuilder_gauges(t *testing.T) { @@ -527,11 +528,11 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("gauge_test") - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) gauge0 := m0.Gauge() pt0 := gauge0.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) @@ -539,10 +540,10 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { pt0.SetTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - mL1 := pdata.NewMetricSlice() + mL1 := pmetric.NewMetricSlice() m1 := mL1.AppendEmpty() m1.SetName("gauge_test") - m1.SetDataType(pdata.MetricDataTypeGauge) + m1.SetDataType(pmetric.MetricDataTypeGauge) gauge1 := m1.Gauge() pt1 := gauge1.DataPoints().AppendEmpty() pt1.SetDoubleVal(90.0) @@ -550,7 +551,7 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { pt1.SetTimestamp(startTsPlusIntervalNanos) pt1.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0, &mL1} + return []*pmetric.MetricSlice{&mL0, &mL1} }, }, { @@ -563,11 +564,11 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("gauge_test") - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) gauge0 := m0.Gauge() pt0 := gauge0.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) @@ -581,7 +582,7 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { pt1.SetTimestamp(startTsNanos) pt1.Attributes().InsertString("bar", "foo") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -601,11 +602,11 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("gauge_test") - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) gauge0 := m0.Gauge() pt0 := gauge0.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) @@ -619,10 +620,10 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { pt1.SetTimestamp(startTsNanos) pt1.Attributes().InsertString("bar", "foo") - mL1 := pdata.NewMetricSlice() + mL1 := pmetric.NewMetricSlice() m1 := mL1.AppendEmpty() m1.SetName("gauge_test") - m1.SetDataType(pdata.MetricDataTypeGauge) + m1.SetDataType(pmetric.MetricDataTypeGauge) gauge1 := m1.Gauge() pt2 := gauge1.DataPoints().AppendEmpty() pt2.SetDoubleVal(20.0) @@ -630,7 +631,7 @@ func Test_OTLPMetricBuilder_gauges(t *testing.T) { pt2.SetTimestamp(startTsPlusIntervalNanos) pt2.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0, &mL1} + return []*pmetric.MetricSlice{&mL0, &mL1} }, }, } @@ -649,11 +650,11 @@ func Test_OTLPMetricBuilder_untype(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("unknown_test") - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) gauge0 := m0.Gauge() pt0 := gauge0.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) @@ -661,7 +662,7 @@ func Test_OTLPMetricBuilder_untype(t *testing.T) { pt0.SetTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -675,11 +676,11 @@ func Test_OTLPMetricBuilder_untype(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("something_not_exists") - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) gauge0 := m0.Gauge() pt0 := gauge0.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) @@ -688,7 +689,7 @@ func Test_OTLPMetricBuilder_untype(t *testing.T) { m1 := mL0.AppendEmpty() m1.SetName("theother_not_exists") - m1.SetDataType(pdata.MetricDataTypeGauge) + m1.SetDataType(pmetric.MetricDataTypeGauge) gauge1 := m1.Gauge() pt1 := gauge1.DataPoints().AppendEmpty() pt1.SetDoubleVal(200.0) @@ -700,7 +701,7 @@ func Test_OTLPMetricBuilder_untype(t *testing.T) { pt2.SetTimestamp(startTsNanos) pt2.Attributes().InsertString("bar", "foo") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -712,18 +713,18 @@ func Test_OTLPMetricBuilder_untype(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("some_count") - m0.SetDataType(pdata.MetricDataTypeGauge) + m0.SetDataType(pmetric.MetricDataTypeGauge) gauge0 := m0.Gauge() pt0 := gauge0.DataPoints().AppendEmpty() pt0.SetDoubleVal(100.0) pt0.SetTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, } @@ -746,13 +747,13 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("hist_test") - m0.SetDataType(pdata.MetricDataTypeHistogram) + m0.SetDataType(pmetric.MetricDataTypeHistogram) hist0 := m0.Histogram() - hist0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(10) pt0.SetSum(99) @@ -762,7 +763,7 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { pt0.SetStartTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -783,13 +784,13 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("hist_test") - m0.SetDataType(pdata.MetricDataTypeHistogram) + m0.SetDataType(pmetric.MetricDataTypeHistogram) hist0 := m0.Histogram() - hist0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(10) pt0.SetSum(99) @@ -808,7 +809,7 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { pt1.SetStartTimestamp(startTsNanos) pt1.Attributes().InsertString("key2", "v2") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -834,13 +835,13 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("hist_test") - m0.SetDataType(pdata.MetricDataTypeHistogram) + m0.SetDataType(pmetric.MetricDataTypeHistogram) hist0 := m0.Histogram() - hist0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(10) pt0.SetSum(99) @@ -861,9 +862,9 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { m1 := mL0.AppendEmpty() m1.SetName("hist_test2") - m1.SetDataType(pdata.MetricDataTypeHistogram) + m1.SetDataType(pmetric.MetricDataTypeHistogram) hist1 := m1.Histogram() - hist1.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist1.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt2 := hist1.DataPoints().AppendEmpty() pt2.SetCount(3) pt2.SetSum(50) @@ -872,7 +873,7 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { pt2.SetTimestamp(startTsNanos) pt2.SetStartTimestamp(startTsNanos) - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -888,13 +889,13 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("hist_test") - m0.SetDataType(pdata.MetricDataTypeHistogram) + m0.SetDataType(pmetric.MetricDataTypeHistogram) hist0 := m0.Histogram() - hist0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(10) pt0.SetSum(99) @@ -904,7 +905,7 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { pt0.SetStartTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -919,13 +920,13 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("hist_test") - m0.SetDataType(pdata.MetricDataTypeHistogram) + m0.SetDataType(pmetric.MetricDataTypeHistogram) hist0 := m0.Histogram() - hist0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(3) pt0.SetSum(100) @@ -934,7 +935,7 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { pt0.SetTimestamp(startTsNanos) pt0.SetStartTimestamp(startTsNanos) - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -949,13 +950,13 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("hist_test") - m0.SetDataType(pdata.MetricDataTypeHistogram) + m0.SetDataType(pmetric.MetricDataTypeHistogram) hist0 := m0.Histogram() - hist0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(3) pt0.SetSum(100) @@ -964,7 +965,7 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { pt0.SetTimestamp(startTsNanos) pt0.SetStartTimestamp(startTsNanos) - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -979,13 +980,13 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("hist_test") - m0.SetDataType(pdata.MetricDataTypeHistogram) + m0.SetDataType(pmetric.MetricDataTypeHistogram) hist0 := m0.Histogram() - hist0.SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + hist0.SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) pt0 := hist0.DataPoints().AppendEmpty() pt0.SetCount(3) pt0.SetSum(0) @@ -995,7 +996,7 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { pt0.SetStartTimestamp(startTsNanos) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -1008,9 +1009,9 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() - return []*pdata.MetricSlice{&mL0} + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -1025,9 +1026,9 @@ func Test_OTLPMetricBuilder_histogram(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() - return []*pdata.MetricSlice{&mL0} + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() + return []*pmetric.MetricSlice{&mL0} }, }, } @@ -1046,9 +1047,9 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() - return []*pdata.MetricSlice{&mL0} + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -1063,9 +1064,9 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() - return []*pdata.MetricSlice{&mL0} + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -1080,11 +1081,11 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("summary_test") - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) sum0 := m0.Summary() pt0 := sum0.DataPoints().AppendEmpty() pt0.SetTimestamp(startTsNanos) @@ -1103,7 +1104,7 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { q100.SetQuantile(1) q100.SetValue(5.0) - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -1116,11 +1117,11 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("summary_test") - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) sum0 := m0.Summary() pt0 := sum0.DataPoints().AppendEmpty() pt0.SetStartTimestamp(startTsNanos) @@ -1129,7 +1130,7 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { pt0.SetSum(100.0) pt0.Attributes().InsertString("foo", "bar") - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, { @@ -1145,11 +1146,11 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { }, }, }, - wants: func() []*pdata.MetricSlice { - mL0 := pdata.NewMetricSlice() + wants: func() []*pmetric.MetricSlice { + mL0 := pmetric.NewMetricSlice() m0 := mL0.AppendEmpty() m0.SetName("summary_test") - m0.SetDataType(pdata.MetricDataTypeSummary) + m0.SetDataType(pmetric.MetricDataTypeSummary) sum0 := m0.Summary() pt0 := sum0.DataPoints().AppendEmpty() pt0.SetStartTimestamp(startTsNanos) @@ -1168,7 +1169,7 @@ func Test_OTLPMetricBuilder_summary(t *testing.T) { q100.SetQuantile(1) q100.SetValue(5.0) - return []*pdata.MetricSlice{&mL0} + return []*pmetric.MetricSlice{&mL0} }, }, } diff --git a/receiver/prometheusreceiver/internal/otlp_transaction.go b/receiver/prometheusreceiver/internal/otlp_transaction.go index 95f8f00bd6c2..a0618d93a624 100644 --- a/receiver/prometheusreceiver/internal/otlp_transaction.go +++ b/receiver/prometheusreceiver/internal/otlp_transaction.go @@ -26,8 +26,9 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" ) @@ -39,7 +40,7 @@ type transactionPdata struct { startTimeMetricRegex string sink consumer.Metrics externalLabels labels.Labels - nodeResource *pdata.Resource + nodeResource *pcommon.Resource logger *zap.Logger receiverID config.ComponentID metricBuilder *metricBuilderPdata @@ -162,35 +163,35 @@ func (t *transactionPdata) Rollback() error { return nil } -func pdataTimestampFromFloat64(ts float64) pdata.Timestamp { +func pdataTimestampFromFloat64(ts float64) pcommon.Timestamp { secs := int64(ts) nanos := int64((ts - float64(secs)) * 1e9) - return pdata.NewTimestampFromTime(time.Unix(secs, nanos)) + return pcommon.NewTimestampFromTime(time.Unix(secs, nanos)) } -func (t transactionPdata) adjustStartTimestampPdata(metricsL *pdata.MetricSlice) { +func (t transactionPdata) adjustStartTimestampPdata(metricsL *pmetric.MetricSlice) { startTimeTs := pdataTimestampFromFloat64(t.metricBuilder.startTime) for i := 0; i < metricsL.Len(); i++ { metric := metricsL.At(i) switch metric.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: continue - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: dataPoints := metric.Sum().DataPoints() for j := 0; j < dataPoints.Len(); j++ { dp := dataPoints.At(j) dp.SetStartTimestamp(startTimeTs) } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: dataPoints := metric.Summary().DataPoints() for j := 0; j < dataPoints.Len(); j++ { dp := dataPoints.At(j) dp.SetStartTimestamp(startTimeTs) } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: dataPoints := metric.Histogram().DataPoints() for j := 0; j < dataPoints.Len(); j++ { dp := dataPoints.At(j) @@ -203,8 +204,8 @@ func (t transactionPdata) adjustStartTimestampPdata(metricsL *pdata.MetricSlice) } } -func (t *transactionPdata) metricSliceToMetrics(metricsL *pdata.MetricSlice) *pdata.Metrics { - metrics := pdata.NewMetrics() +func (t *transactionPdata) metricSliceToMetrics(metricsL *pmetric.MetricSlice) *pmetric.Metrics { + metrics := pmetric.NewMetrics() rms := metrics.ResourceMetrics().AppendEmpty() ilm := rms.ScopeMetrics().AppendEmpty() metricsL.CopyTo(ilm.Metrics()) diff --git a/receiver/prometheusreceiver/internal/prom_to_otlp.go b/receiver/prometheusreceiver/internal/prom_to_otlp.go index 9797c71f1f19..e4cbf9503347 100644 --- a/receiver/prometheusreceiver/internal/prom_to_otlp.go +++ b/receiver/prometheusreceiver/internal/prom_to_otlp.go @@ -17,8 +17,8 @@ package internal // import "github.com/open-telemetry/opentelemetry-collector-co import ( "net" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) // isDiscernibleHost checks if a host can be used as a value for the 'host.name' key. @@ -41,12 +41,12 @@ func isDiscernibleHost(host string) bool { } // CreateNodeAndResourcePdata creates the resource data added to OTLP payloads. -func CreateNodeAndResourcePdata(job, instance, scheme string) *pdata.Resource { +func CreateNodeAndResourcePdata(job, instance, scheme string) *pcommon.Resource { host, port, err := net.SplitHostPort(instance) if err != nil { host = instance } - resource := pdata.NewResource() + resource := pcommon.NewResource() attrs := resource.Attributes() attrs.UpsertString(conventions.AttributeServiceName, job) if isDiscernibleHost(host) { diff --git a/receiver/prometheusreceiver/internal/prom_to_otlp_test.go b/receiver/prometheusreceiver/internal/prom_to_otlp_test.go index a9ec5dce0165..f08ef24454fa 100644 --- a/receiver/prometheusreceiver/internal/prom_to_otlp_test.go +++ b/receiver/prometheusreceiver/internal/prom_to_otlp_test.go @@ -18,15 +18,15 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" ) type jobInstanceDefinition struct { job, instance, host, scheme, port string } -func makeResourceWithJobInstanceScheme(def *jobInstanceDefinition, hasHost bool) *pdata.Resource { - resource := pdata.NewResource() +func makeResourceWithJobInstanceScheme(def *jobInstanceDefinition, hasHost bool) *pcommon.Resource { + resource := pcommon.NewResource() attrs := resource.Attributes() // Using hardcoded values to assert on outward expectations so that // when variables change, these tests will fail and we'll have reports. @@ -45,7 +45,7 @@ func TestCreateNodeAndResourcePromToOTLP(t *testing.T) { name, job string instance string scheme string - want *pdata.Resource + want *pcommon.Resource }{ { name: "all attributes proper", diff --git a/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go b/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go index 070b69ffac3c..db9f23d24887 100644 --- a/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go +++ b/receiver/prometheusreceiver/internal/staleness_end_to_end_test.go @@ -158,8 +158,9 @@ service: } appSettings := service.CollectorSettings{ - Factories: factories, - ConfigProvider: service.MustNewDefaultConfigProvider([]string{confFile.Name()}, nil), + Factories: factories, + // TODO: Replace with NewConfigProvider + ConfigProvider: service.MustNewDefaultConfigProvider([]string{confFile.Name()}, nil), // nolint:staticcheck BuildInfo: component.BuildInfo{ Command: "otelcol", Description: "OpenTelemetry Collector", diff --git a/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/receiver/prometheusreceiver/metrics_receiver_helper_test.go index 2c18c3b24252..0ebd4ffe9dd5 100644 --- a/receiver/prometheusreceiver/metrics_receiver_helper_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -35,7 +35,8 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "gopkg.in/yaml.v2" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal" @@ -115,9 +116,9 @@ type testData struct { name string relabeledJob string // Used when relabeling or honor_labels changes the target to something other than 'name'. pages []mockPrometheusResponse - attributes pdata.Map + attributes pcommon.Map validateScrapes bool - validateFunc func(t *testing.T, td *testData, result []*pdata.ResourceMetrics) + validateFunc func(t *testing.T, td *testData, result []*pmetric.ResourceMetrics) } // setupMockPrometheus to create a mocked prometheus based on targets, returning the server and a prometheus exporting @@ -158,7 +159,7 @@ func setupMockPrometheus(tds ...*testData) (*mockPrometheus, *promcfg.Config, er return mp, pCfg, err } -func verifyNumValidScrapeResults(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyNumValidScrapeResults(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { want := 0 for _, p := range td.pages { if p.code == 200 { @@ -168,7 +169,7 @@ func verifyNumValidScrapeResults(t *testing.T, td *testData, resourceMetrics []* require.LessOrEqual(t, want, len(resourceMetrics), "want at least %d valid scrapes, but got %d", want, len(resourceMetrics)) } -func verifyNumTotalScrapeResults(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyNumTotalScrapeResults(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { want := 0 for _, p := range td.pages { if p.code == 200 || p.code == 500 { @@ -178,8 +179,8 @@ func verifyNumTotalScrapeResults(t *testing.T, td *testData, resourceMetrics []* require.LessOrEqual(t, want, len(resourceMetrics), "want at least %d total scrapes, but got %d", want, len(resourceMetrics)) } -func getMetrics(rm *pdata.ResourceMetrics) []*pdata.Metric { - metrics := make([]*pdata.Metric, 0) +func getMetrics(rm *pmetric.ResourceMetrics) []*pmetric.Metric { + metrics := make([]*pmetric.Metric, 0) ilms := rm.ScopeMetrics() for j := 0; j < ilms.Len(); j++ { metricSlice := ilms.At(j).Metrics() @@ -191,7 +192,7 @@ func getMetrics(rm *pdata.ResourceMetrics) []*pdata.Metric { return metrics } -func metricsCount(resourceMetric *pdata.ResourceMetrics) int { +func metricsCount(resourceMetric *pmetric.ResourceMetrics) int { metricsCount := 0 ilms := resourceMetric.ScopeMetrics() for j := 0; j < ilms.Len(); j++ { @@ -201,8 +202,8 @@ func metricsCount(resourceMetric *pdata.ResourceMetrics) int { return metricsCount } -func getValidScrapes(t *testing.T, rms []*pdata.ResourceMetrics) []*pdata.ResourceMetrics { - out := make([]*pdata.ResourceMetrics, 0) +func getValidScrapes(t *testing.T, rms []*pmetric.ResourceMetrics) []*pmetric.ResourceMetrics { + out := make([]*pmetric.ResourceMetrics, 0) // rms will include failed scrapes and scrapes that received no metrics but have internal scrape metrics, filter those out for i := 0; i < len(rms); i++ { allMetrics := getMetrics(rms[i]) @@ -219,7 +220,7 @@ func getValidScrapes(t *testing.T, rms []*pdata.ResourceMetrics) []*pdata.Resour return out } -func isFirstFailedScrape(metrics []*pdata.Metric) bool { +func isFirstFailedScrape(metrics []*pmetric.Metric) bool { for _, m := range metrics { if m.Name() == "up" { if m.Gauge().DataPoints().At(0).DoubleVal() == 1 { // assumed up will not have multiple datapoints @@ -234,27 +235,27 @@ func isFirstFailedScrape(metrics []*pdata.Metric) bool { continue } switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: for i := 0; i < m.Gauge().DataPoints().Len(); i++ { - if !m.Gauge().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if !m.Gauge().DataPoints().At(i).Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { return false } } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: for i := 0; i < m.Sum().DataPoints().Len(); i++ { - if !m.Sum().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if !m.Sum().DataPoints().At(i).Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { return false } } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: for i := 0; i < m.Histogram().DataPoints().Len(); i++ { - if !m.Histogram().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if !m.Histogram().DataPoints().At(i).Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { return false } } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: for i := 0; i < m.Summary().DataPoints().Len(); i++ { - if !m.Summary().DataPoints().At(i).Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue) { + if !m.Summary().DataPoints().At(i).Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue) { return false } } @@ -263,7 +264,7 @@ func isFirstFailedScrape(metrics []*pdata.Metric) bool { return true } -func assertUp(t *testing.T, expected float64, metrics []*pdata.Metric) { +func assertUp(t *testing.T, expected float64, metrics []*pmetric.Metric) { for _, m := range metrics { if m.Name() == "up" { assert.Equal(t, expected, m.Gauge().DataPoints().At(0).DoubleVal()) // (assumed up will not have multiple datapoints) @@ -273,7 +274,7 @@ func assertUp(t *testing.T, expected float64, metrics []*pdata.Metric) { t.Error("No 'up' metric found") } -func countScrapeMetricsRM(got *pdata.ResourceMetrics) int { +func countScrapeMetricsRM(got *pmetric.ResourceMetrics) int { n := 0 ilms := got.ScopeMetrics() for j := 0; j < ilms.Len(); j++ { @@ -288,7 +289,7 @@ func countScrapeMetricsRM(got *pdata.ResourceMetrics) int { return n } -func countScrapeMetrics(metrics []*pdata.Metric) int { +func countScrapeMetrics(metrics []*pmetric.Metric) int { n := 0 for _, m := range metrics { if isDefaultMetrics(m) { @@ -298,7 +299,7 @@ func countScrapeMetrics(metrics []*pdata.Metric) int { return n } -func isDefaultMetrics(m *pdata.Metric) bool { +func isDefaultMetrics(m *pmetric.Metric) bool { switch m.Name() { case "up", "scrape_duration_seconds", "scrape_samples_scraped", "scrape_samples_post_metric_relabeling", "scrape_series_added": return true @@ -307,10 +308,10 @@ func isDefaultMetrics(m *pdata.Metric) bool { return false } -type metricTypeComparator func(*testing.T, *pdata.Metric) -type numberPointComparator func(*testing.T, *pdata.NumberDataPoint) -type histogramPointComparator func(*testing.T, *pdata.HistogramDataPoint) -type summaryPointComparator func(*testing.T, *pdata.SummaryDataPoint) +type metricTypeComparator func(*testing.T, *pmetric.Metric) +type numberPointComparator func(*testing.T, *pmetric.NumberDataPoint) +type histogramPointComparator func(*testing.T, *pmetric.HistogramDataPoint) +type summaryPointComparator func(*testing.T, *pmetric.SummaryDataPoint) type dataPointExpectation struct { numberPointComparator []numberPointComparator @@ -318,9 +319,9 @@ type dataPointExpectation struct { summaryPointComparator []summaryPointComparator } -type testExpectation func(*testing.T, *pdata.ResourceMetrics) +type testExpectation func(*testing.T, *pmetric.ResourceMetrics) -func doCompare(t *testing.T, name string, want pdata.Map, got *pdata.ResourceMetrics, expectations []testExpectation) { +func doCompare(t *testing.T, name string, want pcommon.Map, got *pmetric.ResourceMetrics, expectations []testExpectation) { t.Run(name, func(t *testing.T) { assert.Equal(t, expectedScrapeMetricCount, countScrapeMetricsRM(got)) assert.Equal(t, want.Len(), got.Resource().Attributes().Len()) @@ -339,7 +340,7 @@ func doCompare(t *testing.T, name string, want pdata.Map, got *pdata.ResourceMet func assertMetricPresent(name string, metricTypeExpectations metricTypeComparator, dataPointExpectations []dataPointExpectation) testExpectation { - return func(t *testing.T, rm *pdata.ResourceMetrics) { + return func(t *testing.T, rm *pmetric.ResourceMetrics) { allMetrics := getMetrics(rm) for _, m := range allMetrics { if name != m.Name() { @@ -349,24 +350,24 @@ func assertMetricPresent(name string, metricTypeExpectations metricTypeComparato for i, de := range dataPointExpectations { for _, npc := range de.numberPointComparator { switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: require.Equal(t, m.Gauge().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Gauge metric does not match to testdata") dataPoint := m.Gauge().DataPoints().At(i) npc(t, &dataPoint) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: require.Equal(t, m.Sum().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Sum metric does not match to testdata") dataPoint := m.Sum().DataPoints().At(i) npc(t, &dataPoint) } } switch m.DataType() { - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: for _, hpc := range de.histogramPointComparator { require.Equal(t, m.Histogram().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Histogram metric does not match to testdata") dataPoint := m.Histogram().DataPoints().At(i) hpc(t, &dataPoint) } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: for _, spc := range de.summaryPointComparator { require.Equal(t, m.Summary().DataPoints().Len(), len(dataPointExpectations), "Expected number of data-points in Summary metric does not match to testdata") dataPoint := m.Summary().DataPoints().At(i) @@ -379,7 +380,7 @@ func assertMetricPresent(name string, metricTypeExpectations metricTypeComparato } func assertMetricAbsent(name string) testExpectation { - return func(t *testing.T, rm *pdata.ResourceMetrics) { + return func(t *testing.T, rm *pmetric.ResourceMetrics) { allMetrics := getMetrics(rm) for _, m := range allMetrics { assert.NotEqual(t, name, m.Name(), "Metric is present, but was expected absent") @@ -387,14 +388,14 @@ func assertMetricAbsent(name string) testExpectation { } } -func compareMetricType(typ pdata.MetricDataType) metricTypeComparator { - return func(t *testing.T, metric *pdata.Metric) { +func compareMetricType(typ pmetric.MetricDataType) metricTypeComparator { + return func(t *testing.T, metric *pmetric.Metric) { assert.Equal(t, typ.String(), metric.DataType().String(), "Metric type does not match") } } func compareAttributes(attributes map[string]string) numberPointComparator { - return func(t *testing.T, numberDataPoint *pdata.NumberDataPoint) { + return func(t *testing.T, numberDataPoint *pmetric.NumberDataPoint) { req := assert.Equal(t, len(attributes), numberDataPoint.Attributes().Len(), "Attributes length do not match") if req { for k, v := range attributes { @@ -410,7 +411,7 @@ func compareAttributes(attributes map[string]string) numberPointComparator { } func compareSummaryAttributes(attributes map[string]string) summaryPointComparator { - return func(t *testing.T, summaryDataPoint *pdata.SummaryDataPoint) { + return func(t *testing.T, summaryDataPoint *pmetric.SummaryDataPoint) { req := assert.Equal(t, len(attributes), summaryDataPoint.Attributes().Len(), "Summary attributes length do not match") if req { for k, v := range attributes { @@ -426,13 +427,13 @@ func compareSummaryAttributes(attributes map[string]string) summaryPointComparat } func assertAttributesAbsent() numberPointComparator { - return func(t *testing.T, numberDataPoint *pdata.NumberDataPoint) { + return func(t *testing.T, numberDataPoint *pmetric.NumberDataPoint) { assert.Equal(t, 0, numberDataPoint.Attributes().Len(), "Attributes length should be 0") } } func compareHistogramAttributes(attributes map[string]string) histogramPointComparator { - return func(t *testing.T, histogramDataPoint *pdata.HistogramDataPoint) { + return func(t *testing.T, histogramDataPoint *pmetric.HistogramDataPoint) { req := assert.Equal(t, len(attributes), histogramDataPoint.Attributes().Len(), "Histogram attributes length do not match") if req { for k, v := range attributes { @@ -448,77 +449,77 @@ func compareHistogramAttributes(attributes map[string]string) histogramPointComp } func assertNumberPointFlagNoRecordedValue() numberPointComparator { - return func(t *testing.T, numberDataPoint *pdata.NumberDataPoint) { - assert.True(t, numberDataPoint.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue), + return func(t *testing.T, numberDataPoint *pmetric.NumberDataPoint) { + assert.True(t, numberDataPoint.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue), "Datapoint flag for staleness marker not found as expected") } } func assertHistogramPointFlagNoRecordedValue() histogramPointComparator { - return func(t *testing.T, histogramDataPoint *pdata.HistogramDataPoint) { - assert.True(t, histogramDataPoint.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue), + return func(t *testing.T, histogramDataPoint *pmetric.HistogramDataPoint) { + assert.True(t, histogramDataPoint.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue), "Datapoint flag for staleness marker not found as expected") } } func assertSummaryPointFlagNoRecordedValue() summaryPointComparator { - return func(t *testing.T, summaryDataPoint *pdata.SummaryDataPoint) { - assert.True(t, summaryDataPoint.Flags().HasFlag(pdata.MetricDataPointFlagNoRecordedValue), + return func(t *testing.T, summaryDataPoint *pmetric.SummaryDataPoint) { + assert.True(t, summaryDataPoint.Flags().HasFlag(pmetric.MetricDataPointFlagNoRecordedValue), "Datapoint flag for staleness marker not found as expected") } } -func compareStartTimestamp(startTimeStamp pdata.Timestamp) numberPointComparator { - return func(t *testing.T, numberDataPoint *pdata.NumberDataPoint) { +func compareStartTimestamp(startTimeStamp pcommon.Timestamp) numberPointComparator { + return func(t *testing.T, numberDataPoint *pmetric.NumberDataPoint) { assert.Equal(t, startTimeStamp.String(), numberDataPoint.StartTimestamp().String(), "Start-Timestamp does not match") } } -func compareTimestamp(timeStamp pdata.Timestamp) numberPointComparator { - return func(t *testing.T, numberDataPoint *pdata.NumberDataPoint) { +func compareTimestamp(timeStamp pcommon.Timestamp) numberPointComparator { + return func(t *testing.T, numberDataPoint *pmetric.NumberDataPoint) { assert.Equal(t, timeStamp.String(), numberDataPoint.Timestamp().String(), "Timestamp does not match") } } -func compareHistogramTimestamp(timeStamp pdata.Timestamp) histogramPointComparator { - return func(t *testing.T, histogramDataPoint *pdata.HistogramDataPoint) { +func compareHistogramTimestamp(timeStamp pcommon.Timestamp) histogramPointComparator { + return func(t *testing.T, histogramDataPoint *pmetric.HistogramDataPoint) { assert.Equal(t, timeStamp.String(), histogramDataPoint.Timestamp().String(), "Histogram Timestamp does not match") } } -func compareHistogramStartTimestamp(timeStamp pdata.Timestamp) histogramPointComparator { - return func(t *testing.T, histogramDataPoint *pdata.HistogramDataPoint) { +func compareHistogramStartTimestamp(timeStamp pcommon.Timestamp) histogramPointComparator { + return func(t *testing.T, histogramDataPoint *pmetric.HistogramDataPoint) { assert.Equal(t, timeStamp.String(), histogramDataPoint.StartTimestamp().String(), "Histogram Start-Timestamp does not match") } } -func compareSummaryTimestamp(timeStamp pdata.Timestamp) summaryPointComparator { - return func(t *testing.T, summaryDataPoint *pdata.SummaryDataPoint) { +func compareSummaryTimestamp(timeStamp pcommon.Timestamp) summaryPointComparator { + return func(t *testing.T, summaryDataPoint *pmetric.SummaryDataPoint) { assert.Equal(t, timeStamp.String(), summaryDataPoint.Timestamp().String(), "Summary Timestamp does not match") } } -func compareSummaryStartTimestamp(timeStamp pdata.Timestamp) summaryPointComparator { - return func(t *testing.T, summaryDataPoint *pdata.SummaryDataPoint) { +func compareSummaryStartTimestamp(timeStamp pcommon.Timestamp) summaryPointComparator { + return func(t *testing.T, summaryDataPoint *pmetric.SummaryDataPoint) { assert.Equal(t, timeStamp.String(), summaryDataPoint.StartTimestamp().String(), "Summary Start-Timestamp does not match") } } func compareDoubleValue(doubleVal float64) numberPointComparator { - return func(t *testing.T, numberDataPoint *pdata.NumberDataPoint) { + return func(t *testing.T, numberDataPoint *pmetric.NumberDataPoint) { assert.Equal(t, doubleVal, numberDataPoint.DoubleVal(), "Metric double value does not match") } } func assertNormalNan() numberPointComparator { - return func(t *testing.T, numberDataPoint *pdata.NumberDataPoint) { + return func(t *testing.T, numberDataPoint *pmetric.NumberDataPoint) { assert.True(t, math.Float64bits(numberDataPoint.DoubleVal()) == value.NormalNaN, "Metric double value is not normalNaN as expected") } } func compareHistogram(count uint64, sum float64, buckets []uint64) histogramPointComparator { - return func(t *testing.T, histogramDataPoint *pdata.HistogramDataPoint) { + return func(t *testing.T, histogramDataPoint *pmetric.HistogramDataPoint) { assert.Equal(t, count, histogramDataPoint.Count(), "Histogram count value does not match") assert.Equal(t, sum, histogramDataPoint.Sum(), "Histogram sum value does not match") assert.Equal(t, buckets, histogramDataPoint.BucketCounts(), "Histogram bucket count values do not match") @@ -526,7 +527,7 @@ func compareHistogram(count uint64, sum float64, buckets []uint64) histogramPoin } func compareSummary(count uint64, sum float64, quantiles [][]float64) summaryPointComparator { - return func(t *testing.T, summaryDataPoint *pdata.SummaryDataPoint) { + return func(t *testing.T, summaryDataPoint *pmetric.SummaryDataPoint) { assert.Equal(t, count, summaryDataPoint.Count(), "Summary count value does not match") assert.Equal(t, sum, summaryDataPoint.Sum(), "Summary sum value does not match") req := assert.Equal(t, len(quantiles), summaryDataPoint.QuantileValues().Len()) @@ -610,15 +611,15 @@ func flattenTargets(targets map[string][]*scrape.Target) []*scrape.Target { return flatTargets } -func splitMetricsByTarget(metrics []pdata.Metrics) map[string][]*pdata.ResourceMetrics { - pResults := make(map[string][]*pdata.ResourceMetrics) +func splitMetricsByTarget(metrics []pmetric.Metrics) map[string][]*pmetric.ResourceMetrics { + pResults := make(map[string][]*pmetric.ResourceMetrics) for _, md := range metrics { rms := md.ResourceMetrics() for i := 0; i < rms.Len(); i++ { name, _ := rms.At(i).Resource().Attributes().Get("service.name") pResult, ok := pResults[name.AsString()] if !ok { - pResult = make([]*pdata.ResourceMetrics, 0) + pResult = make([]*pmetric.ResourceMetrics, 0) } rm := rms.At(i) pResults[name.AsString()] = append(pResult, &rm) @@ -627,21 +628,21 @@ func splitMetricsByTarget(metrics []pdata.Metrics) map[string][]*pdata.ResourceM return pResults } -func getTS(ms pdata.MetricSlice) pdata.Timestamp { +func getTS(ms pmetric.MetricSlice) pcommon.Timestamp { if ms.Len() == 0 { return 0 } m := ms.At(0) switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: return m.Gauge().DataPoints().At(0).Timestamp() - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: return m.Sum().DataPoints().At(0).Timestamp() - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: return m.Histogram().DataPoints().At(0).Timestamp() - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: return m.Summary().DataPoints().At(0).Timestamp() - case pdata.MetricDataTypeExponentialHistogram: + case pmetric.MetricDataTypeExponentialHistogram: return m.ExponentialHistogram().DataPoints().At(0).Timestamp() } return 0 diff --git a/receiver/prometheusreceiver/metrics_receiver_honor_timestamp_test.go b/receiver/prometheusreceiver/metrics_receiver_honor_timestamp_test.go index ad8ec7b77a9b..cb313a4f1ff9 100644 --- a/receiver/prometheusreceiver/metrics_receiver_honor_timestamp_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_honor_timestamp_test.go @@ -22,7 +22,8 @@ import ( promcfg "github.com/prometheus/prometheus/config" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) var ( @@ -208,7 +209,7 @@ func setMetricsTimestamp() { }) } -func verifyHonorTimeStampsTrue(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyHonorTimeStampsTrue(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] // m1 has 4 metrics + 5 internal scraper metrics @@ -217,53 +218,53 @@ func verifyHonorTimeStampsTrue(t *testing.T, td *testData, resourceMetrics []*pd wantAttributes := td.attributes e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts1))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts1))), compareDoubleValue(19), }, }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ - compareStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts2))), - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts2))), + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts2))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts2))), compareDoubleValue(100), compareAttributes(map[string]string{"method": "post", "code": "200"}), }, }, { numberPointComparator: []numberPointComparator{ - compareStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts3))), - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts3))), + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts3))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts3))), compareDoubleValue(5), compareAttributes(map[string]string{"method": "post", "code": "400"}), }, }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ - compareHistogramStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts4))), - compareHistogramTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts4))), + compareHistogramStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts4))), + compareHistogramTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts4))), compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), }, }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ - compareSummaryStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts5))), - compareSummaryTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts5))), + compareSummaryStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts5))), + compareSummaryTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts5))), compareSummary(1000, 5000, [][]float64{{0.01, 1}, {0.9, 5}, {0.99, 8}}), }, }, @@ -277,53 +278,53 @@ func verifyHonorTimeStampsTrue(t *testing.T, td *testData, resourceMetrics []*pd e2 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts6))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts6))), compareDoubleValue(18), }, }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ - compareStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts7))), - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts7))), + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts7))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts7))), compareDoubleValue(99), compareAttributes(map[string]string{"method": "post", "code": "200"}), }, }, { numberPointComparator: []numberPointComparator{ - compareStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts8))), - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts8))), + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts8))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts8))), compareDoubleValue(3), compareAttributes(map[string]string{"method": "post", "code": "400"}), }, }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ - compareHistogramStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts9))), - compareHistogramTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts9))), + compareHistogramStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts9))), + compareHistogramTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts9))), compareHistogram(2400, 4950, []uint64{900, 500, 500, 500}), }, }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ - compareSummaryStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts10))), - compareSummaryTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts10))), + compareSummaryStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts10))), + compareSummaryTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts10))), compareSummary(900, 4980, [][]float64{{0.01, 1}, {0.9, 6}, {0.99, 8}}), }, }, @@ -337,53 +338,53 @@ func verifyHonorTimeStampsTrue(t *testing.T, td *testData, resourceMetrics []*pd e3 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts11))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts11))), compareDoubleValue(19), }, }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ - compareStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts7))), - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts12))), + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts7))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts12))), compareDoubleValue(100), compareAttributes(map[string]string{"method": "post", "code": "200"}), }, }, { numberPointComparator: []numberPointComparator{ - compareStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts8))), - compareTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts13))), + compareStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts8))), + compareTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts13))), compareDoubleValue(5), compareAttributes(map[string]string{"method": "post", "code": "400"}), }, }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ - compareHistogramStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts9))), - compareHistogramTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts14))), + compareHistogramStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts9))), + compareHistogramTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts14))), compareHistogram(2500, 5000, []uint64{1000, 500, 500, 500}), }, }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ - compareSummaryStartTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts10))), - compareSummaryTimestamp(pdata.NewTimestampFromTime(time.UnixMilli(ts15))), + compareSummaryStartTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts10))), + compareSummaryTimestamp(pcommon.NewTimestampFromTime(time.UnixMilli(ts15))), compareSummary(1000, 5000, [][]float64{{0.01, 1}, {0.9, 5}, {0.99, 8}}), }, }, @@ -392,7 +393,7 @@ func verifyHonorTimeStampsTrue(t *testing.T, td *testData, resourceMetrics []*pd doCompare(t, "scrape-honorTimestamp-3", wantAttributes, m3, e3) } -func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -405,7 +406,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -415,7 +416,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -435,7 +436,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -446,7 +447,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -467,7 +468,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p ts2 := getTS(metricsScrape2) e2 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -477,7 +478,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -497,7 +498,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -508,7 +509,7 @@ func verifyHonorTimeStampsFalse(t *testing.T, td *testData, resourceMetrics []*p }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ diff --git a/receiver/prometheusreceiver/metrics_receiver_labels_test.go b/receiver/prometheusreceiver/metrics_receiver_labels_test.go index b72e1abd66cc..5cc783a78f82 100644 --- a/receiver/prometheusreceiver/metrics_receiver_labels_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_labels_test.go @@ -21,7 +21,8 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) const targetExternalLabels = ` @@ -45,7 +46,7 @@ func TestExternalLabels(t *testing.T) { }) } -func verifyExternalLabels(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyExternalLabels(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, rms) require.Greater(t, len(rms), 0, "At least one resource metric should be present") @@ -54,7 +55,7 @@ func verifyExternalLabels(t *testing.T, td *testData, rms []*pdata.ResourceMetri ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() doCompare(t, "scrape-externalLabels", wantAttributes, rms[0], []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -73,7 +74,7 @@ const targetLabelLimit1 = ` test_gauge0{label1="value1",label2="value2"} 10 ` -func verifyLabelLimitTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyLabelLimitTarget1(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { //each sample in the scraped metrics is within the configured label_limit, scrape should be successful verifyNumValidScrapeResults(t, td, rms) require.Greater(t, len(rms), 0, "At least one resource metric should be present") @@ -84,7 +85,7 @@ func verifyLabelLimitTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMe doCompare(t, "scrape-labelLimit", want, rms[0], []testExpectation{ assertMetricPresent("test_gauge0", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -104,7 +105,7 @@ const targetLabelLimit2 = ` test_gauge0{label1="value1",label2="value2",label3="value3"} 10 ` -func verifyFailedScrape(t *testing.T, _ *testData, rms []*pdata.ResourceMetrics) { +func verifyFailedScrape(t *testing.T, _ *testData, rms []*pmetric.ResourceMetrics) { //Scrape should be unsuccessful since limit is exceeded in target2 for _, rm := range rms { metrics := getMetrics(rm) @@ -165,7 +166,7 @@ test_summary0_sum{label1="value1",label2="value2"} 5000 test_summary0_count{label1="value1",label2="value2"} 1000 ` -func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, rms) require.Greater(t, len(rms), 0, "At least one resource metric should be present") @@ -175,7 +176,7 @@ func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pdata.ResourceM e1 := []testExpectation{ assertMetricPresent("test_counter0", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -187,7 +188,7 @@ func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pdata.ResourceM }, }), assertMetricPresent("test_gauge0", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -198,7 +199,7 @@ func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pdata.ResourceM }, }), assertMetricPresent("test_histogram0", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -210,7 +211,7 @@ func verifyLabelConfigTarget1(t *testing.T, td *testData, rms []*pdata.ResourceM }, }), assertMetricPresent("test_summary0", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -325,7 +326,7 @@ test_summary0_sum{id="1",testLabel=""} 5000 test_summary0_count{id="1",testLabel=""} 1000 ` -func verifyEmptyLabelValuesTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyEmptyLabelValuesTarget1(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { require.Greater(t, len(rms), 0, "At least one resource metric should be present") want := td.attributes @@ -334,7 +335,7 @@ func verifyEmptyLabelValuesTarget1(t *testing.T, td *testData, rms []*pdata.Reso e1 := []testExpectation{ assertMetricPresent("test_gauge0", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -345,7 +346,7 @@ func verifyEmptyLabelValuesTarget1(t *testing.T, td *testData, rms []*pdata.Reso }, }), assertMetricPresent("test_counter0", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -356,7 +357,7 @@ func verifyEmptyLabelValuesTarget1(t *testing.T, td *testData, rms []*pdata.Reso }, }), assertMetricPresent("test_histogram0", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -368,7 +369,7 @@ func verifyEmptyLabelValuesTarget1(t *testing.T, td *testData, rms []*pdata.Reso }, }), assertMetricPresent("test_summary0", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -396,7 +397,7 @@ test_counter0{id="1",testLabel=""} 100 test_counter0{id="2",testLabel="foobar"} 110 ` -func verifyEmptyLabelValuesTarget2(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyEmptyLabelValuesTarget2(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { require.Greater(t, len(rms), 0, "At least one resource metric should be present") want := td.attributes @@ -405,7 +406,7 @@ func verifyEmptyLabelValuesTarget2(t *testing.T, td *testData, rms []*pdata.Reso e1 := []testExpectation{ assertMetricPresent("test_gauge0", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -423,7 +424,7 @@ func verifyEmptyLabelValuesTarget2(t *testing.T, td *testData, rms []*pdata.Reso }, }), assertMetricPresent("test_counter0", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -470,7 +471,7 @@ const honorLabelsTarget = ` test_gauge0{instance="hostname:8080",job="honor_labels_test",testLabel="value1"} 1 ` -func verifyHonorLabelsFalse(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyHonorLabelsFalse(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { want := td.attributes require.Greater(t, len(rms), 0, "At least one resource metric should be present") @@ -479,7 +480,7 @@ func verifyHonorLabelsFalse(t *testing.T, td *testData, rms []*pdata.ResourceMet doCompare(t, "honor_labels_false", want, rms[0], []testExpectation{ assertMetricPresent("test_gauge0", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -504,7 +505,7 @@ test_gauge0 19 test_counter0 100 ` -func verifyEmptyLabelsTarget1(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyEmptyLabelsTarget1(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { require.Greater(t, len(rms), 0, "At least one resource metric should be present") want := td.attributes @@ -514,7 +515,7 @@ func verifyEmptyLabelsTarget1(t *testing.T, td *testData, rms []*pdata.ResourceM e1 := []testExpectation{ assertMetricPresent( "test_gauge0", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -527,7 +528,7 @@ func verifyEmptyLabelsTarget1(t *testing.T, td *testData, rms []*pdata.ResourceM ), assertMetricPresent( "test_counter0", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -569,7 +570,7 @@ func TestHonorLabelsFalseConfig(t *testing.T) { testComponent(t, targets, false, "") } -func verifyHonorLabelsTrue(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyHonorLabelsTrue(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { //Test for honor_labels: true is skipped. Currently, the Prometheus receiver is unable to support this config //See: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/5757 //TODO: Enable this test once issue 5757 is resolved @@ -579,17 +580,17 @@ func verifyHonorLabelsTrue(t *testing.T, td *testData, rms []*pdata.ResourceMetr //job and instance label values should be honored from honorLabelsTarget expectedAttributes := td.attributes - expectedAttributes.Update("job", pdata.NewValueString("honor_labels_test")) - expectedAttributes.Update("instance", pdata.NewValueString("hostname:8080")) - expectedAttributes.Update("host.name", pdata.NewValueString("hostname")) - expectedAttributes.Update("port", pdata.NewValueString("8080")) + expectedAttributes.Update("job", pcommon.NewValueString("honor_labels_test")) + expectedAttributes.Update("instance", pcommon.NewValueString("hostname:8080")) + expectedAttributes.Update("host.name", pcommon.NewValueString("hostname")) + expectedAttributes.Update("port", pcommon.NewValueString("8080")) metrics1 := rms[0].ScopeMetrics().At(0).Metrics() ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() doCompare(t, "honor_labels_true", expectedAttributes, rms[0], []testExpectation{ assertMetricPresent("test_gauge0", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -661,21 +662,21 @@ func TestRelabelJobInstance(t *testing.T) { }) } -func verifyRelabelJobInstance(t *testing.T, td *testData, rms []*pdata.ResourceMetrics) { +func verifyRelabelJobInstance(t *testing.T, td *testData, rms []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, rms) require.Greater(t, len(rms), 0, "At least one resource metric should be present") wantAttributes := td.attributes - wantAttributes.Update("service.name", pdata.NewValueString("not-target1")) - wantAttributes.Update("service.instance.id", pdata.NewValueString("relabeled-instance")) - wantAttributes.Update("net.host.port", pdata.NewValueString("")) - wantAttributes.Insert("net.host.name", pdata.NewValueString("relabeled-instance")) + wantAttributes.Update("service.name", pcommon.NewValueString("not-target1")) + wantAttributes.Update("service.instance.id", pcommon.NewValueString("relabeled-instance")) + wantAttributes.Update("net.host.port", pcommon.NewValueString("")) + wantAttributes.Insert("net.host.name", pcommon.NewValueString("relabeled-instance")) metrics1 := rms[0].InstrumentationLibraryMetrics().At(0).Metrics() ts1 := metrics1.At(0).Gauge().DataPoints().At(0).Timestamp() doCompare(t, "relabel-job-instance", wantAttributes, rms[0], []testExpectation{ assertMetricPresent("jvm_memory_bytes_used", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ diff --git a/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go b/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go index b43bc8bd5b68..f3e5d4b25133 100644 --- a/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_non_numerical_test.go @@ -21,7 +21,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) var staleNaNsPage1 = ` @@ -83,7 +84,7 @@ func TestStaleNaNs(t *testing.T) { testComponent(t, targets, false, "") } -func verifyStaleNaNs(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyStaleNaNs(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumTotalScrapeResults(t, td, resourceMetrics) metrics1 := resourceMetrics[0].ScopeMetrics().At(0).Metrics() ts1 := getTS(metrics1) @@ -96,7 +97,7 @@ func verifyStaleNaNs(t *testing.T, td *testData, resourceMetrics []*pdata.Resour } } -func verifyStaleNaNPage1SuccessfulScrape(t *testing.T, td *testData, resourceMetric *pdata.ResourceMetrics, startTimestamp *pdata.Timestamp, iteration int) { +func verifyStaleNaNPage1SuccessfulScrape(t *testing.T, td *testData, resourceMetric *pmetric.ResourceMetrics, startTimestamp *pcommon.Timestamp, iteration int) { // m1 has 4 metrics + 5 internal scraper metrics assert.Equal(t, 9, metricsCount(resourceMetric)) wantAttributes := td.attributes // should want attribute be part of complete target or each scrape? @@ -104,7 +105,7 @@ func verifyStaleNaNPage1SuccessfulScrape(t *testing.T, td *testData, resourceMet ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -114,7 +115,7 @@ func verifyStaleNaNPage1SuccessfulScrape(t *testing.T, td *testData, resourceMet }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -134,7 +135,7 @@ func verifyStaleNaNPage1SuccessfulScrape(t *testing.T, td *testData, resourceMet }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -147,7 +148,7 @@ func verifyStaleNaNPage1SuccessfulScrape(t *testing.T, td *testData, resourceMet }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -163,7 +164,7 @@ func verifyStaleNaNPage1SuccessfulScrape(t *testing.T, td *testData, resourceMet doCompare(t, fmt.Sprintf("validScrape-scrape-%d", iteration), wantAttributes, resourceMetric, e1) } -func verifyStaleNanPage1FirstFailedScrape(t *testing.T, td *testData, resourceMetric *pdata.ResourceMetrics, startTimestamp *pdata.Timestamp, iteration int) { +func verifyStaleNanPage1FirstFailedScrape(t *testing.T, td *testData, resourceMetric *pmetric.ResourceMetrics, startTimestamp *pcommon.Timestamp, iteration int) { // m1 has 4 metrics + 5 internal scraper metrics assert.Equal(t, 9, metricsCount(resourceMetric)) wantAttributes := td.attributes @@ -174,7 +175,7 @@ func verifyStaleNanPage1FirstFailedScrape(t *testing.T, td *testData, resourceMe ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -184,7 +185,7 @@ func verifyStaleNanPage1FirstFailedScrape(t *testing.T, td *testData, resourceMe }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -202,7 +203,7 @@ func verifyStaleNanPage1FirstFailedScrape(t *testing.T, td *testData, resourceMe }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -213,7 +214,7 @@ func verifyStaleNanPage1FirstFailedScrape(t *testing.T, td *testData, resourceMe }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -261,7 +262,7 @@ func TestNormalNaNs(t *testing.T) { testComponent(t, targets, false, "") } -func verifyNormalNaNs(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyNormalNaNs(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -274,7 +275,7 @@ func verifyNormalNaNs(t *testing.T, td *testData, resourceMetrics []*pdata.Resou ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -284,7 +285,7 @@ func verifyNormalNaNs(t *testing.T, td *testData, resourceMetrics []*pdata.Resou }, }), assertMetricPresent("redis_connected_clients", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -295,7 +296,7 @@ func verifyNormalNaNs(t *testing.T, td *testData, resourceMetrics []*pdata.Resou }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -345,7 +346,7 @@ func TestInfValues(t *testing.T) { testComponent(t, targets, false, "") } -func verifyInfValues(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyInfValues(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -358,7 +359,7 @@ func verifyInfValues(t *testing.T, td *testData, resourceMetrics []*pdata.Resour ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -368,7 +369,7 @@ func verifyInfValues(t *testing.T, td *testData, resourceMetrics []*pdata.Resour }, }), assertMetricPresent("redis_connected_clients", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -379,7 +380,7 @@ func verifyInfValues(t *testing.T, td *testData, resourceMetrics []*pdata.Resour }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -391,7 +392,7 @@ func verifyInfValues(t *testing.T, td *testData, resourceMetrics []*pdata.Resour }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ diff --git a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go index 6498938eeaa3..4e02d097fedb 100644 --- a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go @@ -23,7 +23,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) const testDir = "./testdata/openmetrics/" @@ -53,7 +53,7 @@ var skippedTests = map[string]struct{}{ "bad_timestamp_4": {}, "bad_timestamp_5": {}, "bad_timestamp_7": {}, "bad_unit_6": {}, "bad_unit_7": {}, } -func verifyPositiveTarget(t *testing.T, _ *testData, mds []*pdata.ResourceMetrics) { +func verifyPositiveTarget(t *testing.T, _ *testData, mds []*pmetric.ResourceMetrics) { require.Greater(t, len(mds), 0, "At least one resource metric should be present") metrics := getMetrics(mds[0]) assertUp(t, 1, metrics) @@ -79,7 +79,7 @@ func TestOpenMetricsPositive(t *testing.T) { } // nolint:unused -func verifyNegativeTarget(t *testing.T, td *testData, mds []*pdata.ResourceMetrics) { +func verifyNegativeTarget(t *testing.T, td *testData, mds []*pmetric.ResourceMetrics) { // failing negative tests are skipped since prometheus scrape package is currently not fully // compatible with OpenMetrics tests and successfully scrapes some invalid metrics // see: https://github.com/prometheus/prometheus/issues/9699 diff --git a/receiver/prometheusreceiver/metrics_receiver_test.go b/receiver/prometheusreceiver/metrics_receiver_test.go index 0267197102c2..ec78af400ac4 100644 --- a/receiver/prometheusreceiver/metrics_receiver_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_test.go @@ -21,7 +21,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "google.golang.org/protobuf/types/known/timestamppb" ) @@ -116,7 +116,7 @@ rpc_duration_seconds_sum 4900 rpc_duration_seconds_count 900 ` -func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -129,7 +129,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -139,7 +139,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -159,7 +159,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -170,7 +170,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -191,7 +191,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts2 := getTS(metricsScrape2) e2 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -201,7 +201,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -221,7 +221,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -233,7 +233,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -254,7 +254,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts3 := getTS(metricsScrape3) e3 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -264,7 +264,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -286,7 +286,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -298,7 +298,7 @@ func verifyTarget1(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -375,7 +375,7 @@ http_requests_total{method="post",code="400"} 59 http_requests_total{method="post",code="500"} 5 ` -func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] // m1 has 2 metrics + 5 internal scraper metrics @@ -387,7 +387,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -397,7 +397,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -427,7 +427,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts2 := getTS(metricsScrape2) e2 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -437,7 +437,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -475,7 +475,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts3 := getTS(metricsScrape3) e3 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -485,7 +485,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -523,7 +523,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts4 := getTS(metricsScrape4) e4 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -533,7 +533,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -571,7 +571,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts5 := getTS(metricsScrape5) e5 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -581,7 +581,7 @@ func verifyTarget2(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -684,7 +684,7 @@ rpc_duration_seconds_sum{foo="no_quantile"} 101 rpc_duration_seconds_count{foo="no_quantile"} 55 ` -func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] // m1 has 3 metrics + 5 internal scraper metrics @@ -696,7 +696,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -706,7 +706,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -718,7 +718,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }), assertMetricAbsent("corrupted_hist"), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -748,7 +748,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.Resource ts2 := getTS(metricsScrape2) e2 := []testExpectation{ assertMetricPresent("go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -758,7 +758,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }, }), assertMetricPresent("http_request_duration_seconds", - compareMetricType(pdata.MetricDataTypeHistogram), + compareMetricType(pmetric.MetricDataTypeHistogram), []dataPointExpectation{ { histogramPointComparator: []histogramPointComparator{ @@ -770,7 +770,7 @@ func verifyTarget3(t *testing.T, td *testData, resourceMetrics []*pdata.Resource }), assertMetricAbsent("corrupted_hist"), assertMetricPresent("rpc_duration_seconds", - compareMetricType(pdata.MetricDataTypeSummary), + compareMetricType(pmetric.MetricDataTypeSummary), []dataPointExpectation{ { summaryPointComparator: []summaryPointComparator{ @@ -866,7 +866,7 @@ var startTimeMetricPageStartTimestamp = ×tamppb.Timestamp{Seconds: 400, Nan // 6 metrics + 5 internal metrics const numStartTimeMetricPageTimeseries = 11 -func verifyStartTimeMetricPage(t *testing.T, td *testData, result []*pdata.ResourceMetrics) { +func verifyStartTimeMetricPage(t *testing.T, td *testData, result []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, result) numTimeseries := 0 for _, rm := range result { @@ -874,7 +874,7 @@ func verifyStartTimeMetricPage(t *testing.T, td *testData, result []*pdata.Resou for i := 0; i < len(metrics); i++ { timestamp := startTimeMetricPageStartTimestamp switch metrics[i].DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: timestamp = nil for j := 0; j < metrics[i].Gauge().DataPoints().Len(); j++ { time := metrics[i].Gauge().DataPoints().At(j).StartTimestamp() @@ -882,19 +882,19 @@ func verifyStartTimeMetricPage(t *testing.T, td *testData, result []*pdata.Resou numTimeseries++ } - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: for j := 0; j < metrics[i].Sum().DataPoints().Len(); j++ { assert.Equal(t, timestamp.AsTime(), metrics[i].Sum().DataPoints().At(j).StartTimestamp().AsTime()) numTimeseries++ } - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: for j := 0; j < metrics[i].Histogram().DataPoints().Len(); j++ { assert.Equal(t, timestamp.AsTime(), metrics[i].Histogram().DataPoints().At(j).StartTimestamp().AsTime()) numTimeseries++ } - case pdata.MetricDataTypeSummary: + case pmetric.MetricDataTypeSummary: for j := 0; j < metrics[i].Summary().DataPoints().Len(); j++ { assert.Equal(t, timestamp.AsTime(), metrics[i].Summary().DataPoints().At(j).StartTimestamp().AsTime()) numTimeseries++ @@ -998,7 +998,7 @@ func TestUntypedMetrics(t *testing.T) { } -func verifyUntypedMetrics(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyUntypedMetrics(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -1011,7 +1011,7 @@ func verifyUntypedMetrics(t *testing.T, td *testData, resourceMetrics []*pdata.R ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -1029,7 +1029,7 @@ func verifyUntypedMetrics(t *testing.T, td *testData, resourceMetrics []*pdata.R }, }), assertMetricPresent("redis_connected_clients", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ diff --git a/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go b/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go index fbf9e8f6631f..6823fb21f1f3 100644 --- a/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go +++ b/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go @@ -21,7 +21,7 @@ import ( promcfg "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/relabel" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) var renameMetric = ` @@ -117,7 +117,7 @@ func TestMetricRenamingKeepAction(t *testing.T) { } -func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -130,7 +130,7 @@ func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []*pdata.Res ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("foo", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -149,7 +149,7 @@ func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []*pdata.Res }), // renaming config converts any metric type to untyped metric, which then gets converted to gauge double type by metric builder assertMetricPresent("http_requests_total", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -171,7 +171,7 @@ func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []*pdata.Res doCompare(t, "scrape-metricRename-1", wantAttributes, m1, e1) } -func verifyRenameMetricKeepAction(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyRenameMetricKeepAction(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -184,7 +184,7 @@ func verifyRenameMetricKeepAction(t *testing.T, td *testData, resourceMetrics [] ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("rpc_duration_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -278,7 +278,7 @@ func TestLabelRenaming(t *testing.T) { } -func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -291,7 +291,7 @@ func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []*pdata.Reso ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("http_go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -302,7 +302,7 @@ func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []*pdata.Reso }, }), assertMetricPresent("http_connected_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -313,7 +313,7 @@ func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []*pdata.Reso }, }), assertMetricPresent("redis_http_requests_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -333,7 +333,7 @@ func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []*pdata.Reso }, }), assertMetricPresent("rpc_duration_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -381,7 +381,7 @@ func TestLabelRenamingKeepAction(t *testing.T) { } -func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []*pdata.ResourceMetrics) { +func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []*pmetric.ResourceMetrics) { verifyNumValidScrapeResults(t, td, resourceMetrics) m1 := resourceMetrics[0] @@ -394,7 +394,7 @@ func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []* ts1 := getTS(metrics1) e1 := []testExpectation{ assertMetricPresent("http_go_threads", - compareMetricType(pdata.MetricDataTypeGauge), + compareMetricType(pmetric.MetricDataTypeGauge), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -405,7 +405,7 @@ func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []* }, }), assertMetricPresent("http_connected_total", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -416,7 +416,7 @@ func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []* }, }), assertMetricPresent(" Redis connected clients", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ @@ -434,7 +434,7 @@ func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []* }, }), assertMetricPresent("RPC clients", - compareMetricType(pdata.MetricDataTypeSum), + compareMetricType(pmetric.MetricDataTypeSum), []dataPointExpectation{ { numberPointComparator: []numberPointComparator{ diff --git a/receiver/rabbitmqreceiver/go.mod b/receiver/rabbitmqreceiver/go.mod index b189c88ca63f..ec14ff324ef1 100644 --- a/receiver/rabbitmqreceiver/go.mod +++ b/receiver/rabbitmqreceiver/go.mod @@ -5,8 +5,8 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -21,7 +21,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -31,7 +31,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.1.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect @@ -39,10 +38,8 @@ require ( go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect @@ -50,3 +47,5 @@ require ( ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/rabbitmqreceiver/go.sum b/receiver/rabbitmqreceiver/go.sum index c11fd57ca165..d3e22cca8022 100644 --- a/receiver/rabbitmqreceiver/go.sum +++ b/receiver/rabbitmqreceiver/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -19,13 +18,11 @@ github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZx github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -37,7 +34,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -122,8 +118,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -170,9 +166,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -190,10 +183,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -203,7 +196,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -242,8 +235,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -268,13 +261,11 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -299,8 +290,6 @@ google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -310,7 +299,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -324,7 +312,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go index 1346b14d971c..6ba10d322da0 100644 --- a/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/rabbitmqreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -47,7 +48,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricRabbitmqConsumerCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -57,12 +58,12 @@ func (m *metricRabbitmqConsumerCount) init() { m.data.SetName("rabbitmq.consumer.count") m.data.SetDescription("The number of consumers currently reading from the queue.") m.data.SetUnit("{consumers}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRabbitmqConsumerCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRabbitmqConsumerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -80,7 +81,7 @@ func (m *metricRabbitmqConsumerCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRabbitmqConsumerCount) emit(metrics pdata.MetricSlice) { +func (m *metricRabbitmqConsumerCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -91,14 +92,14 @@ func (m *metricRabbitmqConsumerCount) emit(metrics pdata.MetricSlice) { func newMetricRabbitmqConsumerCount(settings MetricSettings) metricRabbitmqConsumerCount { m := metricRabbitmqConsumerCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRabbitmqMessageAcknowledged struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -108,12 +109,12 @@ func (m *metricRabbitmqMessageAcknowledged) init() { m.data.SetName("rabbitmq.message.acknowledged") m.data.SetDescription("The number of messages acknowledged by consumers.") m.data.SetUnit("{messages}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRabbitmqMessageAcknowledged) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRabbitmqMessageAcknowledged) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -131,7 +132,7 @@ func (m *metricRabbitmqMessageAcknowledged) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRabbitmqMessageAcknowledged) emit(metrics pdata.MetricSlice) { +func (m *metricRabbitmqMessageAcknowledged) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -142,14 +143,14 @@ func (m *metricRabbitmqMessageAcknowledged) emit(metrics pdata.MetricSlice) { func newMetricRabbitmqMessageAcknowledged(settings MetricSettings) metricRabbitmqMessageAcknowledged { m := metricRabbitmqMessageAcknowledged{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRabbitmqMessageCurrent struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -159,13 +160,13 @@ func (m *metricRabbitmqMessageCurrent) init() { m.data.SetName("rabbitmq.message.current") m.data.SetDescription("The total number of messages currently in the queue.") m.data.SetUnit("{messages}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRabbitmqMessageCurrent) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, messageStateAttributeValue string) { +func (m *metricRabbitmqMessageCurrent) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, messageStateAttributeValue string) { if !m.settings.Enabled { return } @@ -173,7 +174,7 @@ func (m *metricRabbitmqMessageCurrent) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.MessageState, pdata.NewValueString(messageStateAttributeValue)) + dp.Attributes().Insert(A.MessageState, pcommon.NewValueString(messageStateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -184,7 +185,7 @@ func (m *metricRabbitmqMessageCurrent) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRabbitmqMessageCurrent) emit(metrics pdata.MetricSlice) { +func (m *metricRabbitmqMessageCurrent) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -195,14 +196,14 @@ func (m *metricRabbitmqMessageCurrent) emit(metrics pdata.MetricSlice) { func newMetricRabbitmqMessageCurrent(settings MetricSettings) metricRabbitmqMessageCurrent { m := metricRabbitmqMessageCurrent{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRabbitmqMessageDelivered struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -212,12 +213,12 @@ func (m *metricRabbitmqMessageDelivered) init() { m.data.SetName("rabbitmq.message.delivered") m.data.SetDescription("The number of messages delivered to consumers.") m.data.SetUnit("{messages}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRabbitmqMessageDelivered) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRabbitmqMessageDelivered) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -235,7 +236,7 @@ func (m *metricRabbitmqMessageDelivered) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRabbitmqMessageDelivered) emit(metrics pdata.MetricSlice) { +func (m *metricRabbitmqMessageDelivered) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -246,14 +247,14 @@ func (m *metricRabbitmqMessageDelivered) emit(metrics pdata.MetricSlice) { func newMetricRabbitmqMessageDelivered(settings MetricSettings) metricRabbitmqMessageDelivered { m := metricRabbitmqMessageDelivered{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRabbitmqMessageDropped struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -263,12 +264,12 @@ func (m *metricRabbitmqMessageDropped) init() { m.data.SetName("rabbitmq.message.dropped") m.data.SetDescription("The number of messages dropped as unroutable.") m.data.SetUnit("{messages}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRabbitmqMessageDropped) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRabbitmqMessageDropped) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -286,7 +287,7 @@ func (m *metricRabbitmqMessageDropped) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRabbitmqMessageDropped) emit(metrics pdata.MetricSlice) { +func (m *metricRabbitmqMessageDropped) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -297,14 +298,14 @@ func (m *metricRabbitmqMessageDropped) emit(metrics pdata.MetricSlice) { func newMetricRabbitmqMessageDropped(settings MetricSettings) metricRabbitmqMessageDropped { m := metricRabbitmqMessageDropped{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRabbitmqMessagePublished struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -314,12 +315,12 @@ func (m *metricRabbitmqMessagePublished) init() { m.data.SetName("rabbitmq.message.published") m.data.SetDescription("The number of messages published to a queue.") m.data.SetUnit("{messages}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRabbitmqMessagePublished) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRabbitmqMessagePublished) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -337,7 +338,7 @@ func (m *metricRabbitmqMessagePublished) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRabbitmqMessagePublished) emit(metrics pdata.MetricSlice) { +func (m *metricRabbitmqMessagePublished) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -348,7 +349,7 @@ func (m *metricRabbitmqMessagePublished) emit(metrics pdata.MetricSlice) { func newMetricRabbitmqMessagePublished(settings MetricSettings) metricRabbitmqMessagePublished { m := metricRabbitmqMessagePublished{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -357,10 +358,10 @@ func newMetricRabbitmqMessagePublished(settings MetricSettings) metricRabbitmqMe // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricRabbitmqConsumerCount metricRabbitmqConsumerCount metricRabbitmqMessageAcknowledged metricRabbitmqMessageAcknowledged metricRabbitmqMessageCurrent metricRabbitmqMessageCurrent @@ -373,7 +374,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -381,8 +382,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricRabbitmqConsumerCount: newMetricRabbitmqConsumerCount(settings.RabbitmqConsumerCount), metricRabbitmqMessageAcknowledged: newMetricRabbitmqMessageAcknowledged(settings.RabbitmqMessageAcknowledged), metricRabbitmqMessageCurrent: newMetricRabbitmqMessageCurrent(settings.RabbitmqMessageCurrent), @@ -397,7 +398,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -407,25 +408,25 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // WithRabbitmqNodeName sets provided value as "rabbitmq.node.name" attribute for current resource. func WithRabbitmqNodeName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("rabbitmq.node.name", val) } } // WithRabbitmqQueueName sets provided value as "rabbitmq.queue.name" attribute for current resource. func WithRabbitmqQueueName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("rabbitmq.queue.name", val) } } // WithRabbitmqVhostName sets provided value as "rabbitmq.vhost.name" attribute for current resource. func WithRabbitmqVhostName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("rabbitmq.vhost.name", val) } } @@ -435,7 +436,7 @@ func WithRabbitmqVhostName(val string) ResourceOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -458,47 +459,47 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordRabbitmqConsumerCountDataPoint adds a data point to rabbitmq.consumer.count metric. -func (mb *MetricsBuilder) RecordRabbitmqConsumerCountDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRabbitmqConsumerCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRabbitmqConsumerCount.recordDataPoint(mb.startTime, ts, val) } // RecordRabbitmqMessageAcknowledgedDataPoint adds a data point to rabbitmq.message.acknowledged metric. -func (mb *MetricsBuilder) RecordRabbitmqMessageAcknowledgedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRabbitmqMessageAcknowledgedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRabbitmqMessageAcknowledged.recordDataPoint(mb.startTime, ts, val) } // RecordRabbitmqMessageCurrentDataPoint adds a data point to rabbitmq.message.current metric. -func (mb *MetricsBuilder) RecordRabbitmqMessageCurrentDataPoint(ts pdata.Timestamp, val int64, messageStateAttributeValue string) { +func (mb *MetricsBuilder) RecordRabbitmqMessageCurrentDataPoint(ts pcommon.Timestamp, val int64, messageStateAttributeValue string) { mb.metricRabbitmqMessageCurrent.recordDataPoint(mb.startTime, ts, val, messageStateAttributeValue) } // RecordRabbitmqMessageDeliveredDataPoint adds a data point to rabbitmq.message.delivered metric. -func (mb *MetricsBuilder) RecordRabbitmqMessageDeliveredDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRabbitmqMessageDeliveredDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRabbitmqMessageDelivered.recordDataPoint(mb.startTime, ts, val) } // RecordRabbitmqMessageDroppedDataPoint adds a data point to rabbitmq.message.dropped metric. -func (mb *MetricsBuilder) RecordRabbitmqMessageDroppedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRabbitmqMessageDroppedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRabbitmqMessageDropped.recordDataPoint(mb.startTime, ts, val) } // RecordRabbitmqMessagePublishedDataPoint adds a data point to rabbitmq.message.published metric. -func (mb *MetricsBuilder) RecordRabbitmqMessagePublishedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRabbitmqMessagePublishedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRabbitmqMessagePublished.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/rabbitmqreceiver/scraper.go b/receiver/rabbitmqreceiver/scraper.go index 3ca16edcd452..01207ae2939e 100644 --- a/receiver/rabbitmqreceiver/scraper.go +++ b/receiver/rabbitmqreceiver/scraper.go @@ -20,7 +20,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/rabbitmqreceiver/internal/metadata" @@ -71,18 +72,18 @@ func (r *rabbitmqScraper) start(ctx context.Context, host component.Host) (err e } // scrape collects metrics from the RabbitMQ API -func (r *rabbitmqScraper) scrape(ctx context.Context) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (r *rabbitmqScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) // Validate we don't attempt to scrape without initializing the client if r.client == nil { - return pdata.NewMetrics(), errClientNotInit + return pmetric.NewMetrics(), errClientNotInit } // Get queues for processing queues, err := r.client.GetQueues(ctx) if err != nil { - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } // Collect metrics for each queue @@ -95,7 +96,7 @@ func (r *rabbitmqScraper) scrape(ctx context.Context) (pdata.Metrics, error) { } // collectQueue collects metrics -func (r *rabbitmqScraper) collectQueue(queue *models.Queue, now pdata.Timestamp) { +func (r *rabbitmqScraper) collectQueue(queue *models.Queue, now pcommon.Timestamp) { r.mb.RecordRabbitmqConsumerCountDataPoint(now, queue.Consumers) r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.UnacknowledgedMessages, metadata.AttributeMessageState.Unacknowledged) r.mb.RecordRabbitmqMessageCurrentDataPoint(now, queue.ReadyMessages, metadata.AttributeMessageState.Ready) diff --git a/receiver/rabbitmqreceiver/scraper_test.go b/receiver/rabbitmqreceiver/scraper_test.go index d9120c0843e1..45a50f68535d 100644 --- a/receiver/rabbitmqreceiver/scraper_test.go +++ b/receiver/rabbitmqreceiver/scraper_test.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" @@ -89,7 +89,7 @@ func TestScaperScrape(t *testing.T) { testCases := []struct { desc string setupMockClient func(t *testing.T) client - expectedMetricGen func(t *testing.T) pdata.Metrics + expectedMetricGen func(t *testing.T) pmetric.Metrics expectedErr error }{ { @@ -97,8 +97,8 @@ func TestScaperScrape(t *testing.T) { setupMockClient: func(t *testing.T) client { return nil }, - expectedMetricGen: func(t *testing.T) pdata.Metrics { - return pdata.NewMetrics() + expectedMetricGen: func(t *testing.T) pmetric.Metrics { + return pmetric.NewMetrics() }, expectedErr: errClientNotInit, }, @@ -109,9 +109,9 @@ func TestScaperScrape(t *testing.T) { mockClient.On("GetQueues", mock.Anything).Return(nil, errors.New("some api error")) return &mockClient }, - expectedMetricGen: func(t *testing.T) pdata.Metrics { + expectedMetricGen: func(t *testing.T) pmetric.Metrics { - return pdata.NewMetrics() + return pmetric.NewMetrics() }, expectedErr: errors.New("some api error"), }, @@ -128,7 +128,7 @@ func TestScaperScrape(t *testing.T) { mockClient.On("GetQueues", mock.Anything).Return(queues, nil) return &mockClient }, - expectedMetricGen: func(t *testing.T) pdata.Metrics { + expectedMetricGen: func(t *testing.T) pmetric.Metrics { goldenPath := filepath.Join("testdata", "expected_metrics", "metrics_golden.json") expectedMetrics, err := golden.ReadMetrics(goldenPath) require.NoError(t, err) diff --git a/receiver/receivercreator/go.mod b/receiver/receivercreator/go.mod index d8401d2f8af5..a4ef628c88ac 100644 --- a/receiver/receivercreator/go.mod +++ b/receiver/receivercreator/go.mod @@ -9,8 +9,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus v0.48.0 github.com/spf13/cast v1.4.1 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -20,7 +21,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -42,3 +43,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/obse replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/receivercreator/go.sum b/receiver/receivercreator/go.sum index 543db3ff3919..bc5c29b899f2 100644 --- a/receiver/receivercreator/go.sum +++ b/receiver/receivercreator/go.sum @@ -18,7 +18,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -80,7 +80,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -111,8 +110,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -181,10 +180,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= @@ -224,7 +225,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -248,7 +249,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/receivercreator/resourceenhancer.go b/receiver/receivercreator/resourceenhancer.go index ad9065b7f54f..882bd1d9e12e 100644 --- a/receiver/receivercreator/resourceenhancer.go +++ b/receiver/receivercreator/resourceenhancer.go @@ -19,7 +19,7 @@ import ( "fmt" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer" ) @@ -66,7 +66,7 @@ func (r *resourceEnhancer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: true} } -func (r *resourceEnhancer) ConsumeMetrics(ctx context.Context, md pdata.Metrics) error { +func (r *resourceEnhancer) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { rms := rm.At(i) diff --git a/receiver/receivercreator/resourceenhancer_test.go b/receiver/receivercreator/resourceenhancer_test.go index 8c16fb31fbe3..dac2af71c96f 100644 --- a/receiver/receivercreator/resourceenhancer_test.go +++ b/receiver/receivercreator/resourceenhancer_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer" ) @@ -168,14 +168,14 @@ func Test_resourceEnhancer_ConsumeMetrics(t *testing.T) { } type args struct { ctx context.Context - md pdata.Metrics + md pmetric.Metrics } tests := []struct { name string fields fields args args wantErr bool - want pdata.Metrics + want pmetric.Metrics }{ { name: "insert", @@ -188,14 +188,14 @@ func Test_resourceEnhancer_ConsumeMetrics(t *testing.T) { }, args: args{ ctx: context.Background(), - md: func() pdata.Metrics { - md := pdata.NewMetrics() + md: func() pmetric.Metrics { + md := pmetric.NewMetrics() md.ResourceMetrics().AppendEmpty() return md }(), }, - want: func() pdata.Metrics { - md := pdata.NewMetrics() + want: func() pmetric.Metrics { + md := pmetric.NewMetrics() attr := md.ResourceMetrics().AppendEmpty().Resource().Attributes() attr.InsertString("key1", "value1") attr.InsertString("key2", "value2") diff --git a/receiver/redisreceiver/go.mod b/receiver/redisreceiver/go.mod index 2e274ca6acc4..0cc363a31d60 100644 --- a/receiver/redisreceiver/go.mod +++ b/receiver/redisreceiver/go.mod @@ -6,8 +6,8 @@ require ( github.com/go-redis/redis/v7 v7.4.1 github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -24,7 +24,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -36,14 +36,13 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -57,3 +56,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/conta // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/redisreceiver/go.sum b/receiver/redisreceiver/go.sum index bf02de9264bc..b6d0213c91b2 100644 --- a/receiver/redisreceiver/go.sum +++ b/receiver/redisreceiver/go.sum @@ -454,8 +454,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -640,8 +640,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -708,15 +706,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -816,7 +814,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -902,8 +900,8 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go b/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go index 7be8287e304e..bdca1a0b00b3 100644 --- a/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/redisreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -139,7 +140,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricRedisClientsBlocked struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -149,12 +150,12 @@ func (m *metricRedisClientsBlocked) init() { m.data.SetName("redis.clients.blocked") m.data.SetDescription("Number of clients pending on a blocking call") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisClientsBlocked) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisClientsBlocked) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -172,7 +173,7 @@ func (m *metricRedisClientsBlocked) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisClientsBlocked) emit(metrics pdata.MetricSlice) { +func (m *metricRedisClientsBlocked) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -183,14 +184,14 @@ func (m *metricRedisClientsBlocked) emit(metrics pdata.MetricSlice) { func newMetricRedisClientsBlocked(settings MetricSettings) metricRedisClientsBlocked { m := metricRedisClientsBlocked{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisClientsConnected struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -200,12 +201,12 @@ func (m *metricRedisClientsConnected) init() { m.data.SetName("redis.clients.connected") m.data.SetDescription("Number of client connections (excluding connections from replicas)") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisClientsConnected) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisClientsConnected) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -223,7 +224,7 @@ func (m *metricRedisClientsConnected) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisClientsConnected) emit(metrics pdata.MetricSlice) { +func (m *metricRedisClientsConnected) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -234,14 +235,14 @@ func (m *metricRedisClientsConnected) emit(metrics pdata.MetricSlice) { func newMetricRedisClientsConnected(settings MetricSettings) metricRedisClientsConnected { m := metricRedisClientsConnected{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisClientsMaxInputBuffer struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -251,10 +252,10 @@ func (m *metricRedisClientsMaxInputBuffer) init() { m.data.SetName("redis.clients.max_input_buffer") m.data.SetDescription("Biggest input buffer among current client connections") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisClientsMaxInputBuffer) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisClientsMaxInputBuffer) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -272,7 +273,7 @@ func (m *metricRedisClientsMaxInputBuffer) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisClientsMaxInputBuffer) emit(metrics pdata.MetricSlice) { +func (m *metricRedisClientsMaxInputBuffer) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -283,14 +284,14 @@ func (m *metricRedisClientsMaxInputBuffer) emit(metrics pdata.MetricSlice) { func newMetricRedisClientsMaxInputBuffer(settings MetricSettings) metricRedisClientsMaxInputBuffer { m := metricRedisClientsMaxInputBuffer{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisClientsMaxOutputBuffer struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -300,10 +301,10 @@ func (m *metricRedisClientsMaxOutputBuffer) init() { m.data.SetName("redis.clients.max_output_buffer") m.data.SetDescription("Longest output list among current client connections") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisClientsMaxOutputBuffer) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisClientsMaxOutputBuffer) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -321,7 +322,7 @@ func (m *metricRedisClientsMaxOutputBuffer) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisClientsMaxOutputBuffer) emit(metrics pdata.MetricSlice) { +func (m *metricRedisClientsMaxOutputBuffer) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -332,14 +333,14 @@ func (m *metricRedisClientsMaxOutputBuffer) emit(metrics pdata.MetricSlice) { func newMetricRedisClientsMaxOutputBuffer(settings MetricSettings) metricRedisClientsMaxOutputBuffer { m := metricRedisClientsMaxOutputBuffer{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisCommands struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -349,10 +350,10 @@ func (m *metricRedisCommands) init() { m.data.SetName("redis.commands") m.data.SetDescription("Number of commands processed per second") m.data.SetUnit("{ops}/s") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisCommands) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisCommands) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -370,7 +371,7 @@ func (m *metricRedisCommands) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisCommands) emit(metrics pdata.MetricSlice) { +func (m *metricRedisCommands) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -381,14 +382,14 @@ func (m *metricRedisCommands) emit(metrics pdata.MetricSlice) { func newMetricRedisCommands(settings MetricSettings) metricRedisCommands { m := metricRedisCommands{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisCommandsProcessed struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -398,12 +399,12 @@ func (m *metricRedisCommandsProcessed) init() { m.data.SetName("redis.commands.processed") m.data.SetDescription("Total number of commands processed by the server") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisCommandsProcessed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisCommandsProcessed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -421,7 +422,7 @@ func (m *metricRedisCommandsProcessed) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisCommandsProcessed) emit(metrics pdata.MetricSlice) { +func (m *metricRedisCommandsProcessed) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -432,14 +433,14 @@ func (m *metricRedisCommandsProcessed) emit(metrics pdata.MetricSlice) { func newMetricRedisCommandsProcessed(settings MetricSettings) metricRedisCommandsProcessed { m := metricRedisCommandsProcessed{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisConnectionsReceived struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -449,12 +450,12 @@ func (m *metricRedisConnectionsReceived) init() { m.data.SetName("redis.connections.received") m.data.SetDescription("Total number of connections accepted by the server") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisConnectionsReceived) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisConnectionsReceived) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -472,7 +473,7 @@ func (m *metricRedisConnectionsReceived) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisConnectionsReceived) emit(metrics pdata.MetricSlice) { +func (m *metricRedisConnectionsReceived) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -483,14 +484,14 @@ func (m *metricRedisConnectionsReceived) emit(metrics pdata.MetricSlice) { func newMetricRedisConnectionsReceived(settings MetricSettings) metricRedisConnectionsReceived { m := metricRedisConnectionsReceived{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisConnectionsRejected struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -500,12 +501,12 @@ func (m *metricRedisConnectionsRejected) init() { m.data.SetName("redis.connections.rejected") m.data.SetDescription("Number of connections rejected because of maxclients limit") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisConnectionsRejected) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisConnectionsRejected) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -523,7 +524,7 @@ func (m *metricRedisConnectionsRejected) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisConnectionsRejected) emit(metrics pdata.MetricSlice) { +func (m *metricRedisConnectionsRejected) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -534,14 +535,14 @@ func (m *metricRedisConnectionsRejected) emit(metrics pdata.MetricSlice) { func newMetricRedisConnectionsRejected(settings MetricSettings) metricRedisConnectionsRejected { m := metricRedisConnectionsRejected{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisCPUTime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -551,13 +552,13 @@ func (m *metricRedisCPUTime) init() { m.data.SetName("redis.cpu.time") m.data.SetDescription("System CPU consumed by the Redis server in seconds since server start") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRedisCPUTime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64, stateAttributeValue string) { +func (m *metricRedisCPUTime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -565,7 +566,7 @@ func (m *metricRedisCPUTime) recordDataPoint(start pdata.Timestamp, ts pdata.Tim dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetDoubleVal(val) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -576,7 +577,7 @@ func (m *metricRedisCPUTime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisCPUTime) emit(metrics pdata.MetricSlice) { +func (m *metricRedisCPUTime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -587,14 +588,14 @@ func (m *metricRedisCPUTime) emit(metrics pdata.MetricSlice) { func newMetricRedisCPUTime(settings MetricSettings) metricRedisCPUTime { m := metricRedisCPUTime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisDbAvgTTL struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -604,11 +605,11 @@ func (m *metricRedisDbAvgTTL) init() { m.data.SetName("redis.db.avg_ttl") m.data.SetDescription("Average keyspace keys TTL") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRedisDbAvgTTL) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, dbAttributeValue string) { +func (m *metricRedisDbAvgTTL) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, dbAttributeValue string) { if !m.settings.Enabled { return } @@ -616,7 +617,7 @@ func (m *metricRedisDbAvgTTL) recordDataPoint(start pdata.Timestamp, ts pdata.Ti dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Db, pdata.NewValueString(dbAttributeValue)) + dp.Attributes().Insert(A.Db, pcommon.NewValueString(dbAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -627,7 +628,7 @@ func (m *metricRedisDbAvgTTL) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisDbAvgTTL) emit(metrics pdata.MetricSlice) { +func (m *metricRedisDbAvgTTL) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -638,14 +639,14 @@ func (m *metricRedisDbAvgTTL) emit(metrics pdata.MetricSlice) { func newMetricRedisDbAvgTTL(settings MetricSettings) metricRedisDbAvgTTL { m := metricRedisDbAvgTTL{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisDbExpires struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -655,11 +656,11 @@ func (m *metricRedisDbExpires) init() { m.data.SetName("redis.db.expires") m.data.SetDescription("Number of keyspace keys with an expiration") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRedisDbExpires) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, dbAttributeValue string) { +func (m *metricRedisDbExpires) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, dbAttributeValue string) { if !m.settings.Enabled { return } @@ -667,7 +668,7 @@ func (m *metricRedisDbExpires) recordDataPoint(start pdata.Timestamp, ts pdata.T dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Db, pdata.NewValueString(dbAttributeValue)) + dp.Attributes().Insert(A.Db, pcommon.NewValueString(dbAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -678,7 +679,7 @@ func (m *metricRedisDbExpires) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisDbExpires) emit(metrics pdata.MetricSlice) { +func (m *metricRedisDbExpires) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -689,14 +690,14 @@ func (m *metricRedisDbExpires) emit(metrics pdata.MetricSlice) { func newMetricRedisDbExpires(settings MetricSettings) metricRedisDbExpires { m := metricRedisDbExpires{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisDbKeys struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -706,11 +707,11 @@ func (m *metricRedisDbKeys) init() { m.data.SetName("redis.db.keys") m.data.SetDescription("Number of keyspace keys") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRedisDbKeys) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, dbAttributeValue string) { +func (m *metricRedisDbKeys) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, dbAttributeValue string) { if !m.settings.Enabled { return } @@ -718,7 +719,7 @@ func (m *metricRedisDbKeys) recordDataPoint(start pdata.Timestamp, ts pdata.Time dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Db, pdata.NewValueString(dbAttributeValue)) + dp.Attributes().Insert(A.Db, pcommon.NewValueString(dbAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -729,7 +730,7 @@ func (m *metricRedisDbKeys) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisDbKeys) emit(metrics pdata.MetricSlice) { +func (m *metricRedisDbKeys) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -740,14 +741,14 @@ func (m *metricRedisDbKeys) emit(metrics pdata.MetricSlice) { func newMetricRedisDbKeys(settings MetricSettings) metricRedisDbKeys { m := metricRedisDbKeys{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisKeysEvicted struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -757,12 +758,12 @@ func (m *metricRedisKeysEvicted) init() { m.data.SetName("redis.keys.evicted") m.data.SetDescription("Number of evicted keys due to maxmemory limit") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisKeysEvicted) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisKeysEvicted) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -780,7 +781,7 @@ func (m *metricRedisKeysEvicted) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisKeysEvicted) emit(metrics pdata.MetricSlice) { +func (m *metricRedisKeysEvicted) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -791,14 +792,14 @@ func (m *metricRedisKeysEvicted) emit(metrics pdata.MetricSlice) { func newMetricRedisKeysEvicted(settings MetricSettings) metricRedisKeysEvicted { m := metricRedisKeysEvicted{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisKeysExpired struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -808,12 +809,12 @@ func (m *metricRedisKeysExpired) init() { m.data.SetName("redis.keys.expired") m.data.SetDescription("Total number of key expiration events") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisKeysExpired) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisKeysExpired) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -831,7 +832,7 @@ func (m *metricRedisKeysExpired) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisKeysExpired) emit(metrics pdata.MetricSlice) { +func (m *metricRedisKeysExpired) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -842,14 +843,14 @@ func (m *metricRedisKeysExpired) emit(metrics pdata.MetricSlice) { func newMetricRedisKeysExpired(settings MetricSettings) metricRedisKeysExpired { m := metricRedisKeysExpired{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisKeyspaceHits struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -859,12 +860,12 @@ func (m *metricRedisKeyspaceHits) init() { m.data.SetName("redis.keyspace.hits") m.data.SetDescription("Number of successful lookup of keys in the main dictionary") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisKeyspaceHits) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisKeyspaceHits) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -882,7 +883,7 @@ func (m *metricRedisKeyspaceHits) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisKeyspaceHits) emit(metrics pdata.MetricSlice) { +func (m *metricRedisKeyspaceHits) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -893,14 +894,14 @@ func (m *metricRedisKeyspaceHits) emit(metrics pdata.MetricSlice) { func newMetricRedisKeyspaceHits(settings MetricSettings) metricRedisKeyspaceHits { m := metricRedisKeyspaceHits{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisKeyspaceMisses struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -910,12 +911,12 @@ func (m *metricRedisKeyspaceMisses) init() { m.data.SetName("redis.keyspace.misses") m.data.SetDescription("Number of failed lookup of keys in the main dictionary") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisKeyspaceMisses) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisKeyspaceMisses) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -933,7 +934,7 @@ func (m *metricRedisKeyspaceMisses) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisKeyspaceMisses) emit(metrics pdata.MetricSlice) { +func (m *metricRedisKeyspaceMisses) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -944,14 +945,14 @@ func (m *metricRedisKeyspaceMisses) emit(metrics pdata.MetricSlice) { func newMetricRedisKeyspaceMisses(settings MetricSettings) metricRedisKeyspaceMisses { m := metricRedisKeyspaceMisses{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisLatestFork struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -961,10 +962,10 @@ func (m *metricRedisLatestFork) init() { m.data.SetName("redis.latest_fork") m.data.SetDescription("Duration of the latest fork operation in microseconds") m.data.SetUnit("us") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisLatestFork) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisLatestFork) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -982,7 +983,7 @@ func (m *metricRedisLatestFork) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisLatestFork) emit(metrics pdata.MetricSlice) { +func (m *metricRedisLatestFork) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -993,14 +994,14 @@ func (m *metricRedisLatestFork) emit(metrics pdata.MetricSlice) { func newMetricRedisLatestFork(settings MetricSettings) metricRedisLatestFork { m := metricRedisLatestFork{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisMemoryFragmentationRatio struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1010,10 +1011,10 @@ func (m *metricRedisMemoryFragmentationRatio) init() { m.data.SetName("redis.memory.fragmentation_ratio") m.data.SetDescription("Ratio between used_memory_rss and used_memory") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisMemoryFragmentationRatio) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val float64) { +func (m *metricRedisMemoryFragmentationRatio) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val float64) { if !m.settings.Enabled { return } @@ -1031,7 +1032,7 @@ func (m *metricRedisMemoryFragmentationRatio) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisMemoryFragmentationRatio) emit(metrics pdata.MetricSlice) { +func (m *metricRedisMemoryFragmentationRatio) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1042,14 +1043,14 @@ func (m *metricRedisMemoryFragmentationRatio) emit(metrics pdata.MetricSlice) { func newMetricRedisMemoryFragmentationRatio(settings MetricSettings) metricRedisMemoryFragmentationRatio { m := metricRedisMemoryFragmentationRatio{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisMemoryLua struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1059,10 +1060,10 @@ func (m *metricRedisMemoryLua) init() { m.data.SetName("redis.memory.lua") m.data.SetDescription("Number of bytes used by the Lua engine") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisMemoryLua) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisMemoryLua) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1080,7 +1081,7 @@ func (m *metricRedisMemoryLua) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisMemoryLua) emit(metrics pdata.MetricSlice) { +func (m *metricRedisMemoryLua) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1091,14 +1092,14 @@ func (m *metricRedisMemoryLua) emit(metrics pdata.MetricSlice) { func newMetricRedisMemoryLua(settings MetricSettings) metricRedisMemoryLua { m := metricRedisMemoryLua{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisMemoryPeak struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1108,10 +1109,10 @@ func (m *metricRedisMemoryPeak) init() { m.data.SetName("redis.memory.peak") m.data.SetDescription("Peak memory consumed by Redis (in bytes)") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisMemoryPeak) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisMemoryPeak) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1129,7 +1130,7 @@ func (m *metricRedisMemoryPeak) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisMemoryPeak) emit(metrics pdata.MetricSlice) { +func (m *metricRedisMemoryPeak) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1140,14 +1141,14 @@ func (m *metricRedisMemoryPeak) emit(metrics pdata.MetricSlice) { func newMetricRedisMemoryPeak(settings MetricSettings) metricRedisMemoryPeak { m := metricRedisMemoryPeak{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisMemoryRss struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1157,10 +1158,10 @@ func (m *metricRedisMemoryRss) init() { m.data.SetName("redis.memory.rss") m.data.SetDescription("Number of bytes that Redis allocated as seen by the operating system") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisMemoryRss) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisMemoryRss) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1178,7 +1179,7 @@ func (m *metricRedisMemoryRss) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisMemoryRss) emit(metrics pdata.MetricSlice) { +func (m *metricRedisMemoryRss) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1189,14 +1190,14 @@ func (m *metricRedisMemoryRss) emit(metrics pdata.MetricSlice) { func newMetricRedisMemoryRss(settings MetricSettings) metricRedisMemoryRss { m := metricRedisMemoryRss{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisMemoryUsed struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1206,10 +1207,10 @@ func (m *metricRedisMemoryUsed) init() { m.data.SetName("redis.memory.used") m.data.SetDescription("Total number of bytes allocated by Redis using its allocator") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisMemoryUsed) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisMemoryUsed) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1227,7 +1228,7 @@ func (m *metricRedisMemoryUsed) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisMemoryUsed) emit(metrics pdata.MetricSlice) { +func (m *metricRedisMemoryUsed) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1238,14 +1239,14 @@ func (m *metricRedisMemoryUsed) emit(metrics pdata.MetricSlice) { func newMetricRedisMemoryUsed(settings MetricSettings) metricRedisMemoryUsed { m := metricRedisMemoryUsed{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisNetInput struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1255,12 +1256,12 @@ func (m *metricRedisNetInput) init() { m.data.SetName("redis.net.input") m.data.SetDescription("The total number of bytes read from the network") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisNetInput) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisNetInput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1278,7 +1279,7 @@ func (m *metricRedisNetInput) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisNetInput) emit(metrics pdata.MetricSlice) { +func (m *metricRedisNetInput) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1289,14 +1290,14 @@ func (m *metricRedisNetInput) emit(metrics pdata.MetricSlice) { func newMetricRedisNetInput(settings MetricSettings) metricRedisNetInput { m := metricRedisNetInput{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisNetOutput struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1306,12 +1307,12 @@ func (m *metricRedisNetOutput) init() { m.data.SetName("redis.net.output") m.data.SetDescription("The total number of bytes written to the network") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisNetOutput) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisNetOutput) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1329,7 +1330,7 @@ func (m *metricRedisNetOutput) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisNetOutput) emit(metrics pdata.MetricSlice) { +func (m *metricRedisNetOutput) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1340,14 +1341,14 @@ func (m *metricRedisNetOutput) emit(metrics pdata.MetricSlice) { func newMetricRedisNetOutput(settings MetricSettings) metricRedisNetOutput { m := metricRedisNetOutput{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisRdbChangesSinceLastSave struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1357,12 +1358,12 @@ func (m *metricRedisRdbChangesSinceLastSave) init() { m.data.SetName("redis.rdb.changes_since_last_save") m.data.SetDescription("Number of changes since the last dump") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisRdbChangesSinceLastSave) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisRdbChangesSinceLastSave) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1380,7 +1381,7 @@ func (m *metricRedisRdbChangesSinceLastSave) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisRdbChangesSinceLastSave) emit(metrics pdata.MetricSlice) { +func (m *metricRedisRdbChangesSinceLastSave) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1391,14 +1392,14 @@ func (m *metricRedisRdbChangesSinceLastSave) emit(metrics pdata.MetricSlice) { func newMetricRedisRdbChangesSinceLastSave(settings MetricSettings) metricRedisRdbChangesSinceLastSave { m := metricRedisRdbChangesSinceLastSave{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisReplicationBacklogFirstByteOffset struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1408,10 +1409,10 @@ func (m *metricRedisReplicationBacklogFirstByteOffset) init() { m.data.SetName("redis.replication.backlog_first_byte_offset") m.data.SetDescription("The master offset of the replication backlog buffer") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisReplicationBacklogFirstByteOffset) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisReplicationBacklogFirstByteOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1429,7 +1430,7 @@ func (m *metricRedisReplicationBacklogFirstByteOffset) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisReplicationBacklogFirstByteOffset) emit(metrics pdata.MetricSlice) { +func (m *metricRedisReplicationBacklogFirstByteOffset) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1440,14 +1441,14 @@ func (m *metricRedisReplicationBacklogFirstByteOffset) emit(metrics pdata.Metric func newMetricRedisReplicationBacklogFirstByteOffset(settings MetricSettings) metricRedisReplicationBacklogFirstByteOffset { m := metricRedisReplicationBacklogFirstByteOffset{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisReplicationOffset struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1457,10 +1458,10 @@ func (m *metricRedisReplicationOffset) init() { m.data.SetName("redis.replication.offset") m.data.SetDescription("The server's current replication offset") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricRedisReplicationOffset) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisReplicationOffset) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1478,7 +1479,7 @@ func (m *metricRedisReplicationOffset) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisReplicationOffset) emit(metrics pdata.MetricSlice) { +func (m *metricRedisReplicationOffset) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1489,14 +1490,14 @@ func (m *metricRedisReplicationOffset) emit(metrics pdata.MetricSlice) { func newMetricRedisReplicationOffset(settings MetricSettings) metricRedisReplicationOffset { m := metricRedisReplicationOffset{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisSlavesConnected struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1506,12 +1507,12 @@ func (m *metricRedisSlavesConnected) init() { m.data.SetName("redis.slaves.connected") m.data.SetDescription("Number of connected replicas") m.data.SetUnit("") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisSlavesConnected) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisSlavesConnected) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1529,7 +1530,7 @@ func (m *metricRedisSlavesConnected) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisSlavesConnected) emit(metrics pdata.MetricSlice) { +func (m *metricRedisSlavesConnected) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1540,14 +1541,14 @@ func (m *metricRedisSlavesConnected) emit(metrics pdata.MetricSlice) { func newMetricRedisSlavesConnected(settings MetricSettings) metricRedisSlavesConnected { m := metricRedisSlavesConnected{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRedisUptime struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -1557,12 +1558,12 @@ func (m *metricRedisUptime) init() { m.data.SetName("redis.uptime") m.data.SetDescription("Number of seconds since Redis server start") m.data.SetUnit("s") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRedisUptime) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRedisUptime) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -1580,7 +1581,7 @@ func (m *metricRedisUptime) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRedisUptime) emit(metrics pdata.MetricSlice) { +func (m *metricRedisUptime) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -1591,7 +1592,7 @@ func (m *metricRedisUptime) emit(metrics pdata.MetricSlice) { func newMetricRedisUptime(settings MetricSettings) metricRedisUptime { m := metricRedisUptime{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -1600,10 +1601,10 @@ func newMetricRedisUptime(settings MetricSettings) metricRedisUptime { // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricRedisClientsBlocked metricRedisClientsBlocked metricRedisClientsConnected metricRedisClientsConnected metricRedisClientsMaxInputBuffer metricRedisClientsMaxInputBuffer @@ -1639,7 +1640,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -1647,8 +1648,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricRedisClientsBlocked: newMetricRedisClientsBlocked(settings.RedisClientsBlocked), metricRedisClientsConnected: newMetricRedisClientsConnected(settings.RedisClientsConnected), metricRedisClientsMaxInputBuffer: newMetricRedisClientsMaxInputBuffer(settings.RedisClientsMaxInputBuffer), @@ -1686,7 +1687,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -1696,14 +1697,14 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // EmitForResource saves all the generated metrics under a new resource and updates the internal state to be ready for // recording another set of data points as part of another resource. This function can be helpful when one scraper // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -1749,162 +1750,162 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordRedisClientsBlockedDataPoint adds a data point to redis.clients.blocked metric. -func (mb *MetricsBuilder) RecordRedisClientsBlockedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisClientsBlockedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisClientsBlocked.recordDataPoint(mb.startTime, ts, val) } // RecordRedisClientsConnectedDataPoint adds a data point to redis.clients.connected metric. -func (mb *MetricsBuilder) RecordRedisClientsConnectedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisClientsConnectedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisClientsConnected.recordDataPoint(mb.startTime, ts, val) } // RecordRedisClientsMaxInputBufferDataPoint adds a data point to redis.clients.max_input_buffer metric. -func (mb *MetricsBuilder) RecordRedisClientsMaxInputBufferDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisClientsMaxInputBufferDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisClientsMaxInputBuffer.recordDataPoint(mb.startTime, ts, val) } // RecordRedisClientsMaxOutputBufferDataPoint adds a data point to redis.clients.max_output_buffer metric. -func (mb *MetricsBuilder) RecordRedisClientsMaxOutputBufferDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisClientsMaxOutputBufferDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisClientsMaxOutputBuffer.recordDataPoint(mb.startTime, ts, val) } // RecordRedisCommandsDataPoint adds a data point to redis.commands metric. -func (mb *MetricsBuilder) RecordRedisCommandsDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisCommandsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisCommands.recordDataPoint(mb.startTime, ts, val) } // RecordRedisCommandsProcessedDataPoint adds a data point to redis.commands.processed metric. -func (mb *MetricsBuilder) RecordRedisCommandsProcessedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisCommandsProcessedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisCommandsProcessed.recordDataPoint(mb.startTime, ts, val) } // RecordRedisConnectionsReceivedDataPoint adds a data point to redis.connections.received metric. -func (mb *MetricsBuilder) RecordRedisConnectionsReceivedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisConnectionsReceivedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisConnectionsReceived.recordDataPoint(mb.startTime, ts, val) } // RecordRedisConnectionsRejectedDataPoint adds a data point to redis.connections.rejected metric. -func (mb *MetricsBuilder) RecordRedisConnectionsRejectedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisConnectionsRejectedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisConnectionsRejected.recordDataPoint(mb.startTime, ts, val) } // RecordRedisCPUTimeDataPoint adds a data point to redis.cpu.time metric. -func (mb *MetricsBuilder) RecordRedisCPUTimeDataPoint(ts pdata.Timestamp, val float64, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordRedisCPUTimeDataPoint(ts pcommon.Timestamp, val float64, stateAttributeValue string) { mb.metricRedisCPUTime.recordDataPoint(mb.startTime, ts, val, stateAttributeValue) } // RecordRedisDbAvgTTLDataPoint adds a data point to redis.db.avg_ttl metric. -func (mb *MetricsBuilder) RecordRedisDbAvgTTLDataPoint(ts pdata.Timestamp, val int64, dbAttributeValue string) { +func (mb *MetricsBuilder) RecordRedisDbAvgTTLDataPoint(ts pcommon.Timestamp, val int64, dbAttributeValue string) { mb.metricRedisDbAvgTTL.recordDataPoint(mb.startTime, ts, val, dbAttributeValue) } // RecordRedisDbExpiresDataPoint adds a data point to redis.db.expires metric. -func (mb *MetricsBuilder) RecordRedisDbExpiresDataPoint(ts pdata.Timestamp, val int64, dbAttributeValue string) { +func (mb *MetricsBuilder) RecordRedisDbExpiresDataPoint(ts pcommon.Timestamp, val int64, dbAttributeValue string) { mb.metricRedisDbExpires.recordDataPoint(mb.startTime, ts, val, dbAttributeValue) } // RecordRedisDbKeysDataPoint adds a data point to redis.db.keys metric. -func (mb *MetricsBuilder) RecordRedisDbKeysDataPoint(ts pdata.Timestamp, val int64, dbAttributeValue string) { +func (mb *MetricsBuilder) RecordRedisDbKeysDataPoint(ts pcommon.Timestamp, val int64, dbAttributeValue string) { mb.metricRedisDbKeys.recordDataPoint(mb.startTime, ts, val, dbAttributeValue) } // RecordRedisKeysEvictedDataPoint adds a data point to redis.keys.evicted metric. -func (mb *MetricsBuilder) RecordRedisKeysEvictedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisKeysEvictedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisKeysEvicted.recordDataPoint(mb.startTime, ts, val) } // RecordRedisKeysExpiredDataPoint adds a data point to redis.keys.expired metric. -func (mb *MetricsBuilder) RecordRedisKeysExpiredDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisKeysExpiredDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisKeysExpired.recordDataPoint(mb.startTime, ts, val) } // RecordRedisKeyspaceHitsDataPoint adds a data point to redis.keyspace.hits metric. -func (mb *MetricsBuilder) RecordRedisKeyspaceHitsDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisKeyspaceHitsDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisKeyspaceHits.recordDataPoint(mb.startTime, ts, val) } // RecordRedisKeyspaceMissesDataPoint adds a data point to redis.keyspace.misses metric. -func (mb *MetricsBuilder) RecordRedisKeyspaceMissesDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisKeyspaceMissesDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisKeyspaceMisses.recordDataPoint(mb.startTime, ts, val) } // RecordRedisLatestForkDataPoint adds a data point to redis.latest_fork metric. -func (mb *MetricsBuilder) RecordRedisLatestForkDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisLatestForkDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisLatestFork.recordDataPoint(mb.startTime, ts, val) } // RecordRedisMemoryFragmentationRatioDataPoint adds a data point to redis.memory.fragmentation_ratio metric. -func (mb *MetricsBuilder) RecordRedisMemoryFragmentationRatioDataPoint(ts pdata.Timestamp, val float64) { +func (mb *MetricsBuilder) RecordRedisMemoryFragmentationRatioDataPoint(ts pcommon.Timestamp, val float64) { mb.metricRedisMemoryFragmentationRatio.recordDataPoint(mb.startTime, ts, val) } // RecordRedisMemoryLuaDataPoint adds a data point to redis.memory.lua metric. -func (mb *MetricsBuilder) RecordRedisMemoryLuaDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisMemoryLuaDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisMemoryLua.recordDataPoint(mb.startTime, ts, val) } // RecordRedisMemoryPeakDataPoint adds a data point to redis.memory.peak metric. -func (mb *MetricsBuilder) RecordRedisMemoryPeakDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisMemoryPeakDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisMemoryPeak.recordDataPoint(mb.startTime, ts, val) } // RecordRedisMemoryRssDataPoint adds a data point to redis.memory.rss metric. -func (mb *MetricsBuilder) RecordRedisMemoryRssDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisMemoryRssDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisMemoryRss.recordDataPoint(mb.startTime, ts, val) } // RecordRedisMemoryUsedDataPoint adds a data point to redis.memory.used metric. -func (mb *MetricsBuilder) RecordRedisMemoryUsedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisMemoryUsedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisMemoryUsed.recordDataPoint(mb.startTime, ts, val) } // RecordRedisNetInputDataPoint adds a data point to redis.net.input metric. -func (mb *MetricsBuilder) RecordRedisNetInputDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisNetInputDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisNetInput.recordDataPoint(mb.startTime, ts, val) } // RecordRedisNetOutputDataPoint adds a data point to redis.net.output metric. -func (mb *MetricsBuilder) RecordRedisNetOutputDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisNetOutputDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisNetOutput.recordDataPoint(mb.startTime, ts, val) } // RecordRedisRdbChangesSinceLastSaveDataPoint adds a data point to redis.rdb.changes_since_last_save metric. -func (mb *MetricsBuilder) RecordRedisRdbChangesSinceLastSaveDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisRdbChangesSinceLastSaveDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisRdbChangesSinceLastSave.recordDataPoint(mb.startTime, ts, val) } // RecordRedisReplicationBacklogFirstByteOffsetDataPoint adds a data point to redis.replication.backlog_first_byte_offset metric. -func (mb *MetricsBuilder) RecordRedisReplicationBacklogFirstByteOffsetDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisReplicationBacklogFirstByteOffsetDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisReplicationBacklogFirstByteOffset.recordDataPoint(mb.startTime, ts, val) } // RecordRedisReplicationOffsetDataPoint adds a data point to redis.replication.offset metric. -func (mb *MetricsBuilder) RecordRedisReplicationOffsetDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisReplicationOffsetDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisReplicationOffset.recordDataPoint(mb.startTime, ts, val) } // RecordRedisSlavesConnectedDataPoint adds a data point to redis.slaves.connected metric. -func (mb *MetricsBuilder) RecordRedisSlavesConnectedDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisSlavesConnectedDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisSlavesConnected.recordDataPoint(mb.startTime, ts, val) } // RecordRedisUptimeDataPoint adds a data point to redis.uptime metric. -func (mb *MetricsBuilder) RecordRedisUptimeDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRedisUptimeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRedisUptime.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/redisreceiver/metric_functions.go b/receiver/redisreceiver/metric_functions.go index ad5c66a55e4f..4ea776fee802 100644 --- a/receiver/redisreceiver/metric_functions.go +++ b/receiver/redisreceiver/metric_functions.go @@ -14,9 +14,7 @@ package redisreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver" -import ( - "go.opentelemetry.io/collector/model/pdata" -) +import "go.opentelemetry.io/collector/pdata/pcommon" // dataPointRecorders is called once at startup. Returns recorders for all metrics (except keyspace) // we want to extract from Redis INFO. @@ -53,14 +51,14 @@ func (rs *redisScraper) dataPointRecorders() map[string]interface{} { } } -func (rs *redisScraper) recordUsedCPUSys(now pdata.Timestamp, val float64) { +func (rs *redisScraper) recordUsedCPUSys(now pcommon.Timestamp, val float64) { rs.mb.RecordRedisCPUTimeDataPoint(now, val, "sys") } -func (rs *redisScraper) recordUsedCPUSysChildren(now pdata.Timestamp, val float64) { +func (rs *redisScraper) recordUsedCPUSysChildren(now pcommon.Timestamp, val float64) { rs.mb.RecordRedisCPUTimeDataPoint(now, val, "children") } -func (rs *redisScraper) recordUsedCPUSysUser(now pdata.Timestamp, val float64) { +func (rs *redisScraper) recordUsedCPUSysUser(now pcommon.Timestamp, val float64) { rs.mb.RecordRedisCPUTimeDataPoint(now, val, "user") } diff --git a/receiver/redisreceiver/metric_functions_test.go b/receiver/redisreceiver/metric_functions_test.go index 0aaec18a7240..2b7bc454896f 100644 --- a/receiver/redisreceiver/metric_functions_test.go +++ b/receiver/redisreceiver/metric_functions_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/redisreceiver/internal/metadata" @@ -39,7 +39,7 @@ func TestDataPointRecorders(t *testing.T) { metricByRecorder := map[string]string{} for metric, recorder := range rs.dataPointRecorders() { switch recorder.(type) { - case func(pdata.Timestamp, int64), func(pdata.Timestamp, float64): + case func(pcommon.Timestamp, int64), func(pcommon.Timestamp, float64): recorderName := runtime.FuncForPC(reflect.ValueOf(recorder).Pointer()).Name() if m, ok := metricByRecorder[recorderName]; ok { assert.Failf(t, "shared-recorder", "Metrics %q and %q share the same recorder", metric, m) diff --git a/receiver/redisreceiver/redis_scraper.go b/receiver/redisreceiver/redis_scraper.go index 0ff84bb72073..29d357ffeee0 100644 --- a/receiver/redisreceiver/redis_scraper.go +++ b/receiver/redisreceiver/redis_scraper.go @@ -21,7 +21,8 @@ import ( "github.com/go-redis/redis/v7" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scraperhelper" "go.uber.org/zap" @@ -67,20 +68,20 @@ func newRedisScraperWithClient(client client, settings component.ReceiverCreateS // defined at startup time. Then builds 'keyspace' metrics if there are any // keyspace lines returned by Redis. There should be one keyspace line per // active Redis database, of which there can be 16. -func (rs *redisScraper) Scrape(context.Context) (pdata.Metrics, error) { +func (rs *redisScraper) Scrape(context.Context) (pmetric.Metrics, error) { inf, err := rs.redisSvc.info() if err != nil { - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) currentUptime, err := inf.getUptimeInSeconds() if err != nil { - return pdata.Metrics{}, err + return pmetric.Metrics{}, err } if rs.uptime == time.Duration(0) || rs.uptime > currentUptime { - rs.mb.Reset(metadata.WithStartTime(pdata.NewTimestampFromTime(now.AsTime().Add(-currentUptime)))) + rs.mb.Reset(metadata.WithStartTime(pcommon.NewTimestampFromTime(now.AsTime().Add(-currentUptime)))) } rs.uptime = currentUptime @@ -91,7 +92,7 @@ func (rs *redisScraper) Scrape(context.Context) (pdata.Metrics, error) { } // recordCommonMetrics records metrics from Redis info key-value pairs. -func (rs *redisScraper) recordCommonMetrics(ts pdata.Timestamp, inf info) { +func (rs *redisScraper) recordCommonMetrics(ts pcommon.Timestamp, inf info) { recorders := rs.dataPointRecorders() for infoKey, infoVal := range inf { recorder, ok := recorders[infoKey] @@ -100,14 +101,14 @@ func (rs *redisScraper) recordCommonMetrics(ts pdata.Timestamp, inf info) { continue } switch recordDataPoint := recorder.(type) { - case func(pdata.Timestamp, int64): + case func(pcommon.Timestamp, int64): val, err := strconv.ParseInt(infoVal, 10, 64) if err != nil { rs.settings.Logger.Warn("failed to parse info int val", zap.String("key", infoKey), zap.String("val", infoVal), zap.Error(err)) } recordDataPoint(ts, val) - case func(pdata.Timestamp, float64): + case func(pcommon.Timestamp, float64): val, err := strconv.ParseFloat(infoVal, 64) if err != nil { rs.settings.Logger.Warn("failed to parse info float val", zap.String("key", infoKey), @@ -120,7 +121,7 @@ func (rs *redisScraper) recordCommonMetrics(ts pdata.Timestamp, inf info) { // recordKeyspaceMetrics records metrics from 'keyspace' Redis info key-value pairs, // e.g. "db0: keys=1,expires=2,avg_ttl=3". -func (rs *redisScraper) recordKeyspaceMetrics(ts pdata.Timestamp, inf info) { +func (rs *redisScraper) recordKeyspaceMetrics(ts pcommon.Timestamp, inf info) { for db := 0; db < redisMaxDbs; db++ { key := "db" + strconv.Itoa(db) str, ok := inf[key] diff --git a/receiver/riakreceiver/go.mod b/receiver/riakreceiver/go.mod index f4130f44db6e..f120a06aa91e 100644 --- a/receiver/riakreceiver/go.mod +++ b/receiver/riakreceiver/go.mod @@ -3,8 +3,7 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/receiver/riakre go 1.17 require ( - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -14,13 +13,14 @@ require github.com/stretchr/testify v1.7.1 require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.47.0 github.com/testcontainers/testcontainers-go v0.13.0 + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Microsoft/go-winio v0.4.17 // indirect github.com/Microsoft/hcsshim v0.8.23 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/containerd/cgroups v1.0.1 // indirect github.com/containerd/containerd v1.5.9 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -38,7 +38,7 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/uuid v1.3.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -57,16 +57,16 @@ require ( github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/rs/cors v1.8.2 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/stretchr/objx v0.2.0 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect @@ -74,3 +74,5 @@ require ( gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/riakreceiver/go.sum b/receiver/riakreceiver/go.sum index fd9b9037e76c..ee1668e9f360 100644 --- a/receiver/riakreceiver/go.sum +++ b/receiver/riakreceiver/go.sum @@ -102,8 +102,9 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -404,8 +405,8 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -472,8 +473,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -668,8 +669,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -739,10 +738,12 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOUXMTQQ= @@ -752,7 +753,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -852,8 +853,9 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8= golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -943,8 +945,8 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go b/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go index 2211d72eef58..a75043e844ea 100644 --- a/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/riakreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -47,7 +48,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricRiakMemoryLimit struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -57,12 +58,12 @@ func (m *metricRiakMemoryLimit) init() { m.data.SetName("riak.memory.limit") m.data.SetDescription("The amount of memory allocated to the node.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRiakMemoryLimit) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRiakMemoryLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -80,7 +81,7 @@ func (m *metricRiakMemoryLimit) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRiakMemoryLimit) emit(metrics pdata.MetricSlice) { +func (m *metricRiakMemoryLimit) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -91,14 +92,14 @@ func (m *metricRiakMemoryLimit) emit(metrics pdata.MetricSlice) { func newMetricRiakMemoryLimit(settings MetricSettings) metricRiakMemoryLimit { m := metricRiakMemoryLimit{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRiakNodeOperationCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -108,13 +109,13 @@ func (m *metricRiakNodeOperationCount) init() { m.data.SetName("riak.node.operation.count") m.data.SetDescription("The number of operations performed by the node.") m.data.SetUnit("{operation}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRiakNodeOperationCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, requestAttributeValue string) { +func (m *metricRiakNodeOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, requestAttributeValue string) { if !m.settings.Enabled { return } @@ -122,7 +123,7 @@ func (m *metricRiakNodeOperationCount) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Request, pdata.NewValueString(requestAttributeValue)) + dp.Attributes().Insert(A.Request, pcommon.NewValueString(requestAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -133,7 +134,7 @@ func (m *metricRiakNodeOperationCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRiakNodeOperationCount) emit(metrics pdata.MetricSlice) { +func (m *metricRiakNodeOperationCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -144,14 +145,14 @@ func (m *metricRiakNodeOperationCount) emit(metrics pdata.MetricSlice) { func newMetricRiakNodeOperationCount(settings MetricSettings) metricRiakNodeOperationCount { m := metricRiakNodeOperationCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRiakNodeOperationTimeMean struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -161,11 +162,11 @@ func (m *metricRiakNodeOperationTimeMean) init() { m.data.SetName("riak.node.operation.time.mean") m.data.SetDescription("The mean time between request and response for operations performed by the node over the last minute.") m.data.SetUnit("us") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) m.data.Gauge().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRiakNodeOperationTimeMean) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, requestAttributeValue string) { +func (m *metricRiakNodeOperationTimeMean) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, requestAttributeValue string) { if !m.settings.Enabled { return } @@ -173,7 +174,7 @@ func (m *metricRiakNodeOperationTimeMean) recordDataPoint(start pdata.Timestamp, dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Request, pdata.NewValueString(requestAttributeValue)) + dp.Attributes().Insert(A.Request, pcommon.NewValueString(requestAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -184,7 +185,7 @@ func (m *metricRiakNodeOperationTimeMean) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRiakNodeOperationTimeMean) emit(metrics pdata.MetricSlice) { +func (m *metricRiakNodeOperationTimeMean) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -195,14 +196,14 @@ func (m *metricRiakNodeOperationTimeMean) emit(metrics pdata.MetricSlice) { func newMetricRiakNodeOperationTimeMean(settings MetricSettings) metricRiakNodeOperationTimeMean { m := metricRiakNodeOperationTimeMean{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRiakNodeReadRepairCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -212,12 +213,12 @@ func (m *metricRiakNodeReadRepairCount) init() { m.data.SetName("riak.node.read_repair.count") m.data.SetDescription("The number of read repairs performed by the node.") m.data.SetUnit("{read_repair}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricRiakNodeReadRepairCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricRiakNodeReadRepairCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -235,7 +236,7 @@ func (m *metricRiakNodeReadRepairCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRiakNodeReadRepairCount) emit(metrics pdata.MetricSlice) { +func (m *metricRiakNodeReadRepairCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -246,14 +247,14 @@ func (m *metricRiakNodeReadRepairCount) emit(metrics pdata.MetricSlice) { func newMetricRiakNodeReadRepairCount(settings MetricSettings) metricRiakNodeReadRepairCount { m := metricRiakNodeReadRepairCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRiakVnodeIndexOperationCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -263,13 +264,13 @@ func (m *metricRiakVnodeIndexOperationCount) init() { m.data.SetName("riak.vnode.index.operation.count") m.data.SetDescription("The number of index operations performed by vnodes on the node.") m.data.SetUnit("{operation}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRiakVnodeIndexOperationCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (m *metricRiakVnodeIndexOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, operationAttributeValue string) { if !m.settings.Enabled { return } @@ -277,7 +278,7 @@ func (m *metricRiakVnodeIndexOperationCount) recordDataPoint(start pdata.Timesta dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Operation, pdata.NewValueString(operationAttributeValue)) + dp.Attributes().Insert(A.Operation, pcommon.NewValueString(operationAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -288,7 +289,7 @@ func (m *metricRiakVnodeIndexOperationCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRiakVnodeIndexOperationCount) emit(metrics pdata.MetricSlice) { +func (m *metricRiakVnodeIndexOperationCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -299,14 +300,14 @@ func (m *metricRiakVnodeIndexOperationCount) emit(metrics pdata.MetricSlice) { func newMetricRiakVnodeIndexOperationCount(settings MetricSettings) metricRiakVnodeIndexOperationCount { m := metricRiakVnodeIndexOperationCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricRiakVnodeOperationCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -316,13 +317,13 @@ func (m *metricRiakVnodeOperationCount) init() { m.data.SetName("riak.vnode.operation.count") m.data.SetDescription("The number of operations performed by vnodes on the node.") m.data.SetUnit("{operation}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricRiakVnodeOperationCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, requestAttributeValue string) { +func (m *metricRiakVnodeOperationCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, requestAttributeValue string) { if !m.settings.Enabled { return } @@ -330,7 +331,7 @@ func (m *metricRiakVnodeOperationCount) recordDataPoint(start pdata.Timestamp, t dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Request, pdata.NewValueString(requestAttributeValue)) + dp.Attributes().Insert(A.Request, pcommon.NewValueString(requestAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -341,7 +342,7 @@ func (m *metricRiakVnodeOperationCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricRiakVnodeOperationCount) emit(metrics pdata.MetricSlice) { +func (m *metricRiakVnodeOperationCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -352,7 +353,7 @@ func (m *metricRiakVnodeOperationCount) emit(metrics pdata.MetricSlice) { func newMetricRiakVnodeOperationCount(settings MetricSettings) metricRiakVnodeOperationCount { m := metricRiakVnodeOperationCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -361,10 +362,10 @@ func newMetricRiakVnodeOperationCount(settings MetricSettings) metricRiakVnodeOp // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricRiakMemoryLimit metricRiakMemoryLimit metricRiakNodeOperationCount metricRiakNodeOperationCount metricRiakNodeOperationTimeMean metricRiakNodeOperationTimeMean @@ -377,7 +378,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -385,8 +386,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricRiakMemoryLimit: newMetricRiakMemoryLimit(settings.RiakMemoryLimit), metricRiakNodeOperationCount: newMetricRiakNodeOperationCount(settings.RiakNodeOperationCount), metricRiakNodeOperationTimeMean: newMetricRiakNodeOperationTimeMean(settings.RiakNodeOperationTimeMean), @@ -401,7 +402,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -411,11 +412,11 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // WithRiakNodeName sets provided value as "riak.node.name" attribute for current resource. func WithRiakNodeName(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("riak.node.name", val) } } @@ -425,7 +426,7 @@ func WithRiakNodeName(val string) ResourceOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -448,47 +449,47 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordRiakMemoryLimitDataPoint adds a data point to riak.memory.limit metric. -func (mb *MetricsBuilder) RecordRiakMemoryLimitDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRiakMemoryLimitDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRiakMemoryLimit.recordDataPoint(mb.startTime, ts, val) } // RecordRiakNodeOperationCountDataPoint adds a data point to riak.node.operation.count metric. -func (mb *MetricsBuilder) RecordRiakNodeOperationCountDataPoint(ts pdata.Timestamp, val int64, requestAttributeValue string) { +func (mb *MetricsBuilder) RecordRiakNodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue string) { mb.metricRiakNodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue) } // RecordRiakNodeOperationTimeMeanDataPoint adds a data point to riak.node.operation.time.mean metric. -func (mb *MetricsBuilder) RecordRiakNodeOperationTimeMeanDataPoint(ts pdata.Timestamp, val int64, requestAttributeValue string) { +func (mb *MetricsBuilder) RecordRiakNodeOperationTimeMeanDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue string) { mb.metricRiakNodeOperationTimeMean.recordDataPoint(mb.startTime, ts, val, requestAttributeValue) } // RecordRiakNodeReadRepairCountDataPoint adds a data point to riak.node.read_repair.count metric. -func (mb *MetricsBuilder) RecordRiakNodeReadRepairCountDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordRiakNodeReadRepairCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricRiakNodeReadRepairCount.recordDataPoint(mb.startTime, ts, val) } // RecordRiakVnodeIndexOperationCountDataPoint adds a data point to riak.vnode.index.operation.count metric. -func (mb *MetricsBuilder) RecordRiakVnodeIndexOperationCountDataPoint(ts pdata.Timestamp, val int64, operationAttributeValue string) { +func (mb *MetricsBuilder) RecordRiakVnodeIndexOperationCountDataPoint(ts pcommon.Timestamp, val int64, operationAttributeValue string) { mb.metricRiakVnodeIndexOperationCount.recordDataPoint(mb.startTime, ts, val, operationAttributeValue) } // RecordRiakVnodeOperationCountDataPoint adds a data point to riak.vnode.operation.count metric. -func (mb *MetricsBuilder) RecordRiakVnodeOperationCountDataPoint(ts pdata.Timestamp, val int64, requestAttributeValue string) { +func (mb *MetricsBuilder) RecordRiakVnodeOperationCountDataPoint(ts pcommon.Timestamp, val int64, requestAttributeValue string) { mb.metricRiakVnodeOperationCount.recordDataPoint(mb.startTime, ts, val, requestAttributeValue) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/riakreceiver/scraper.go b/receiver/riakreceiver/scraper.go index eac5544d14c1..2222857ddfd8 100644 --- a/receiver/riakreceiver/scraper.go +++ b/receiver/riakreceiver/scraper.go @@ -20,7 +20,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/receiver/scrapererror" "go.uber.org/zap" @@ -56,24 +57,24 @@ func (r *riakScraper) start(ctx context.Context, host component.Host) (err error } // scrape collects metrics from the Riak API -func (r *riakScraper) scrape(ctx context.Context) (pdata.Metrics, error) { +func (r *riakScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Validate we don't attempt to scrape without initializing the client if r.client == nil { - return pdata.NewMetrics(), errors.New("client not initialized") + return pmetric.NewMetrics(), errors.New("client not initialized") } // Get stats for processing stats, err := r.client.GetStats(ctx) if err != nil { - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } return r.collectStats(stats) } // collectStats collects metrics -func (r *riakScraper) collectStats(stat *model.Stats) (pdata.Metrics, error) { - now := pdata.NewTimestampFromTime(time.Now()) +func (r *riakScraper) collectStats(stat *model.Stats) (pmetric.Metrics, error) { + now := pcommon.NewTimestampFromTime(time.Now()) var errors scrapererror.ScrapeErrors //scrape node.operation.count metric r.mb.RecordRiakNodeOperationCountDataPoint(now, stat.NodeGets, metadata.AttributeRequest.Get) diff --git a/receiver/riakreceiver/scraper_test.go b/receiver/riakreceiver/scraper_test.go index 13a0d0de7d1d..ac2d51ffa0e4 100644 --- a/receiver/riakreceiver/scraper_test.go +++ b/receiver/riakreceiver/scraper_test.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configtls" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest" @@ -89,7 +89,7 @@ func TestScaperScrape(t *testing.T) { testCases := []struct { desc string setupMockClient func(t *testing.T) client - expectedMetricGen func(t *testing.T) pdata.Metrics + expectedMetricGen func(t *testing.T) pmetric.Metrics expectedErr error }{ { @@ -97,8 +97,8 @@ func TestScaperScrape(t *testing.T) { setupMockClient: func(t *testing.T) client { return nil }, - expectedMetricGen: func(t *testing.T) pdata.Metrics { - return pdata.NewMetrics() + expectedMetricGen: func(t *testing.T) pmetric.Metrics { + return pmetric.NewMetrics() }, expectedErr: errClientNotInit, }, @@ -109,8 +109,8 @@ func TestScaperScrape(t *testing.T) { mockClient.On("GetStats", mock.Anything).Return(nil, errors.New("some api error")) return &mockClient }, - expectedMetricGen: func(t *testing.T) pdata.Metrics { - return pdata.NewMetrics() + expectedMetricGen: func(t *testing.T) pmetric.Metrics { + return pmetric.NewMetrics() }, expectedErr: errors.New("some api error"), }, @@ -127,7 +127,7 @@ func TestScaperScrape(t *testing.T) { mockClient.On("GetStats", mock.Anything).Return(stats, nil) return &mockClient }, - expectedMetricGen: func(t *testing.T) pdata.Metrics { + expectedMetricGen: func(t *testing.T) pmetric.Metrics { goldenPath := filepath.Join("testdata", "scraper", "expected.json") expectedMetrics, err := golden.ReadMetrics(goldenPath) require.NoError(t, err) diff --git a/receiver/sapmreceiver/go.mod b/receiver/sapmreceiver/go.mod index 29f4c4966a0a..8c297469fb29 100644 --- a/receiver/sapmreceiver/go.mod +++ b/receiver/sapmreceiver/go.mod @@ -10,13 +10,14 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.48.0 github.com/signalfx/sapm-proto v0.9.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 ) require ( github.com/apache/thrift v0.16.0 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -25,7 +26,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -33,7 +34,6 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect @@ -44,10 +44,8 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -61,3 +59,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/sapmreceiver/go.sum b/receiver/sapmreceiver/go.sum index e1dcbc29217f..7f514c887ad6 100644 --- a/receiver/sapmreceiver/go.sum +++ b/receiver/sapmreceiver/go.sum @@ -46,7 +46,7 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -117,8 +117,8 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR github.com/bsm/sarama-cluster v2.1.13+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -208,10 +208,12 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -542,8 +544,8 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -688,6 +690,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -699,6 +702,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -732,7 +736,7 @@ github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/signalfx/sapm-proto v0.9.0 h1:x4EfhzOZtBGyt2x8gc/C23Id9B+3lf1zE59VUWLKbpQ= github.com/signalfx/sapm-proto v0.9.0/go.mod h1:OmhyyGyhBzoKQn6G2wM1vpEsGKGo0lym/kj0G41KqZk= @@ -752,7 +756,6 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= @@ -784,8 +787,8 @@ github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMT github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -836,13 +839,16 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= go.opentelemetry.io/collector/model v0.45.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= @@ -855,16 +861,16 @@ go.opentelemetry.io/otel v1.6.0/go.mod h1:bfJD2DZVw0LBxghOTlgnlI0CV3hLDu9XF/QKOU go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= -go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= +go.opentelemetry.io/otel/exporters/prometheus v0.29.0/go.mod h1:Er2VVJQZbHysogooLNchdZ3MLYoI7+d15mHmrRlRJCU= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= +go.opentelemetry.io/otel/sdk/metric v0.29.0/go.mod h1:IFkFNKI8Gq8zBdqOKdODCL9+LInBZLXaGpqSIKphNuU= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= @@ -1006,8 +1012,9 @@ golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1025,6 +1032,7 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1119,7 +1127,6 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1129,9 +1136,9 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/receiver/sapmreceiver/trace_receiver_test.go b/receiver/sapmreceiver/trace_receiver_test.go index 2eb424707baf..e2149ddd8ba3 100644 --- a/receiver/sapmreceiver/trace_receiver_test.go +++ b/receiver/sapmreceiver/trace_receiver_test.go @@ -34,20 +34,21 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" ) -func expectedTraceData(t1, t2, t3 time.Time) pdata.Traces { - traceID := pdata.NewTraceID( +func expectedTraceData(t1, t2, t3 time.Time) ptrace.Traces { + traceID := pcommon.NewTraceID( [16]byte{0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF, 0x80}) - parentSpanID := pdata.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18}) - childSpanID := pdata.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8}) + parentSpanID := pcommon.NewSpanID([8]byte{0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18}) + childSpanID := pcommon.NewSpanID([8]byte{0xAF, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 0xA8}) - traces := pdata.NewTraces() + traces := ptrace.NewTraces() rs := traces.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, "issaTest") rs.Resource().Attributes().InsertBool("bool", true) @@ -60,18 +61,18 @@ func expectedTraceData(t1, t2, t3 time.Time) pdata.Traces { span0.SetParentSpanID(parentSpanID) span0.SetTraceID(traceID) span0.SetName("DBSearch") - span0.SetStartTimestamp(pdata.NewTimestampFromTime(t1)) - span0.SetEndTimestamp(pdata.NewTimestampFromTime(t2)) - span0.Status().SetCode(pdata.StatusCodeError) + span0.SetStartTimestamp(pcommon.NewTimestampFromTime(t1)) + span0.SetEndTimestamp(pcommon.NewTimestampFromTime(t2)) + span0.Status().SetCode(ptrace.StatusCodeError) span0.Status().SetMessage("Stale indices") span1 := spans.AppendEmpty() span1.SetSpanID(parentSpanID) span1.SetTraceID(traceID) span1.SetName("ProxyFetch") - span1.SetStartTimestamp(pdata.NewTimestampFromTime(t2)) - span1.SetEndTimestamp(pdata.NewTimestampFromTime(t3)) - span1.Status().SetCode(pdata.StatusCodeError) + span1.SetStartTimestamp(pcommon.NewTimestampFromTime(t2)) + span1.SetEndTimestamp(pcommon.NewTimestampFromTime(t3)) + span1.Status().SetCode(ptrace.StatusCodeError) span1.Status().SetMessage("Frontend crash") return traces @@ -234,7 +235,7 @@ func TestReception(t *testing.T) { tests := []struct { name string args args - want pdata.Traces + want ptrace.Traces }{ { name: "receive uncompressed sapm", diff --git a/receiver/signalfxreceiver/go.mod b/receiver/signalfxreceiver/go.mod index 906979bc4057..c793802ed617 100644 --- a/receiver/signalfxreceiver/go.mod +++ b/receiver/signalfxreceiver/go.mod @@ -11,17 +11,18 @@ require ( github.com/signalfx/com_signalfx_metrics_protobuf v0.0.3 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect - github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect @@ -34,7 +35,7 @@ require ( github.com/jaegertracing/jaeger v1.32.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -52,7 +53,6 @@ require ( github.com/signalfx/golib/v3 v3.3.13 // indirect github.com/signalfx/sapm-proto v0.4.0 // indirect github.com/signalfx/signalfx-agent/pkg/apm v0.0.0-20201202163743-65b4fa925fc8 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect github.com/tklauser/numcpus v0.4.0 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect @@ -64,10 +64,9 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -87,3 +86,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperre replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/signalfx => ../../pkg/translator/signalfx replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/signalfxreceiver/go.sum b/receiver/signalfxreceiver/go.sum index f1ae05f11d24..78fce01c2f88 100644 --- a/receiver/signalfxreceiver/go.sum +++ b/receiver/signalfxreceiver/go.sum @@ -2,7 +2,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/HdrHistogram/hdrhistogram-go v1.0.1 h1:GX8GAYDuhlFQnI2fRDHQhTlkHMz8bEn0jTI6LJU0mpw= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= @@ -20,16 +19,14 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -42,7 +39,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -60,8 +56,8 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -167,8 +163,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -246,9 +242,6 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4-0.20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -279,10 +272,12 @@ go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -293,7 +288,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -335,8 +330,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -371,7 +366,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -401,7 +395,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -412,7 +405,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -426,7 +418,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/receiver/signalfxreceiver/receiver.go b/receiver/signalfxreceiver/receiver.go index 85e81fa4821e..d6444f957c76 100644 --- a/receiver/signalfxreceiver/receiver.go +++ b/receiver/signalfxreceiver/receiver.go @@ -32,9 +32,10 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -246,7 +247,7 @@ func (r *sfxReceiver) handleDatapointReq(resp http.ResponseWriter, req *http.Req for i := 0; i < md.ResourceMetrics().Len(); i++ { rm := md.ResourceMetrics().At(i) res := rm.Resource() - res.Attributes().Insert(splunk.SFxAccessTokenLabel, pdata.NewValueString(accessToken)) + res.Attributes().Insert(splunk.SFxAccessTokenLabel, pcommon.NewValueString(accessToken)) } } } @@ -282,7 +283,7 @@ func (r *sfxReceiver) handleEventReq(resp http.ResponseWriter, req *http.Request return } - ld := pdata.NewLogs() + ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() signalFxV2EventsToLogRecords(msg.Events, sl.LogRecords()) diff --git a/receiver/signalfxreceiver/receiver_test.go b/receiver/signalfxreceiver/receiver_test.go index e6e1026f8a7e..b71d1878eb45 100644 --- a/receiver/signalfxreceiver/receiver_test.go +++ b/receiver/signalfxreceiver/receiver_test.go @@ -42,7 +42,8 @@ import ( "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/signalfxexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" @@ -112,18 +113,18 @@ func Test_signalfxeceiver_EndToEnd(t *testing.T) { unixSecs := int64(1574092046) unixNSecs := int64(11 * time.Millisecond) - ts := pdata.NewTimestampFromTime(time.Unix(unixSecs, unixNSecs)) + ts := pcommon.NewTimestampFromTime(time.Unix(unixSecs, unixNSecs)) const doubleVal = 1234.5678 const int64Val = int64(123) - want := pdata.NewMetrics() + want := pmetric.NewMetrics() ilm := want.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty() { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_double_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) doublePt := m.Gauge().DataPoints().AppendEmpty() doublePt.SetTimestamp(ts) doublePt.SetDoubleVal(doubleVal) @@ -131,7 +132,7 @@ func Test_signalfxeceiver_EndToEnd(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("gauge_int_with_dims") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) int64Pt := m.Gauge().DataPoints().AppendEmpty() int64Pt.SetTimestamp(ts) int64Pt.SetIntVal(int64Val) @@ -139,8 +140,8 @@ func Test_signalfxeceiver_EndToEnd(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_double_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.SetDataType(pmetric.MetricDataTypeSum) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.Sum().SetIsMonotonic(true) doublePt := m.Sum().DataPoints().AppendEmpty() doublePt.SetTimestamp(ts) @@ -149,8 +150,8 @@ func Test_signalfxeceiver_EndToEnd(t *testing.T) { { m := ilm.Metrics().AppendEmpty() m.SetName("cumulative_int_with_dims") - m.SetDataType(pdata.MetricDataTypeSum) - m.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.SetDataType(pmetric.MetricDataTypeSum) + m.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.Sum().SetIsMonotonic(true) int64Pt := m.Sum().DataPoints().AppendEmpty() int64Pt.SetTimestamp(ts) @@ -561,14 +562,14 @@ func Test_sfxReceiver_TLS(t *testing.T) { msec := time.Now().Unix() * 1e3 - want := pdata.NewMetrics() + want := pmetric.NewMetrics() m := want.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) m.SetName("single") dps := m.Gauge().DataPoints() dp := dps.AppendEmpty() - dp.SetTimestamp(pdata.Timestamp(msec * 1e6)) + dp.SetTimestamp(pcommon.Timestamp(msec * 1e6)) dp.SetIntVal(13) dp.Attributes().InsertString("k0", "v0") diff --git a/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go b/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go index 92c56ca7263c..e8522ccfcacf 100644 --- a/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go +++ b/receiver/signalfxreceiver/signalfxv2_event_to_logdata.go @@ -16,15 +16,16 @@ package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-coll import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" ) // signalFxV2ToMetricsData converts SignalFx event proto data points to -// pdata.LogRecordSlice. Returning the converted data and the number of dropped log +// plog.LogRecordSlice. Returning the converted data and the number of dropped log // records. -func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs pdata.LogRecordSlice) { +func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs plog.LogRecordSlice) { lrs.EnsureCapacity(len(events)) for _, event := range events { @@ -41,7 +42,7 @@ func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs pdata.LogRecordSlic // SignalFx timestamps are in millis so convert to nanos by multiplying // by 1 million. - lr.SetTimestamp(pdata.Timestamp(event.Timestamp * 1e6)) + lr.SetTimestamp(pcommon.Timestamp(event.Timestamp * 1e6)) if event.Category != nil { attrs.InsertInt(splunk.SFxEventCategoryKey, int64(*event.Category)) @@ -57,7 +58,7 @@ func signalFxV2EventsToLogRecords(events []*sfxpb.Event, lrs pdata.LogRecordSlic } if len(event.Properties) > 0 { - propMapVal := pdata.NewValueMap() + propMapVal := pcommon.NewValueMap() propMap := propMapVal.MapVal() propMap.EnsureCapacity(len(event.Properties)) diff --git a/receiver/signalfxreceiver/signalfxv2_event_to_logdata_test.go b/receiver/signalfxreceiver/signalfxv2_event_to_logdata_test.go index ce91bd9540b8..959d477b2598 100644 --- a/receiver/signalfxreceiver/signalfxv2_event_to_logdata_test.go +++ b/receiver/signalfxreceiver/signalfxv2_event_to_logdata_test.go @@ -22,7 +22,8 @@ import ( sfxpb "github.com/signalfx/com_signalfx_metrics_protobuf/model" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" ) func TestSignalFxV2EventsToLogData(t *testing.T) { @@ -48,17 +49,17 @@ func TestSignalFxV2EventsToLogData(t *testing.T) { } } - buildDefaultLogs := func() pdata.LogRecordSlice { - logSlice := pdata.NewLogRecordSlice() + buildDefaultLogs := func() plog.LogRecordSlice { + logSlice := plog.NewLogRecordSlice() l := logSlice.AppendEmpty() - l.SetTimestamp(pdata.NewTimestampFromTime(now.Truncate(time.Millisecond))) + l.SetTimestamp(pcommon.NewTimestampFromTime(now.Truncate(time.Millisecond))) attrs := l.Attributes() attrs.InsertString("com.splunk.signalfx.event_type", "shutdown") attrs.InsertString("k0", "v0") attrs.InsertString("k1", "v1") attrs.InsertString("k2", "v2") - propMapVal := pdata.NewValueMap() + propMapVal := pcommon.NewValueMap() propMap := propMapVal.MapVal() propMap.InsertString("env", "prod") propMap.InsertBool("isActive", true) @@ -67,7 +68,7 @@ func TestSignalFxV2EventsToLogData(t *testing.T) { propMap.InsertNull("nullProp") propMap.Sort() attrs.Insert("com.splunk.signalfx.event_properties", propMapVal) - attrs.Insert("com.splunk.signalfx.event_category", pdata.NewValueInt(int64(sfxpb.EventCategory_USER_DEFINED))) + attrs.Insert("com.splunk.signalfx.event_category", pcommon.NewValueInt(int64(sfxpb.EventCategory_USER_DEFINED))) l.Attributes().Sort() return logSlice @@ -76,7 +77,7 @@ func TestSignalFxV2EventsToLogData(t *testing.T) { tests := []struct { name string sfxEvents []*sfxpb.Event - expected pdata.LogRecordSlice + expected plog.LogRecordSlice }{ { name: "default", @@ -90,9 +91,9 @@ func TestSignalFxV2EventsToLogData(t *testing.T) { e.Category = nil return []*sfxpb.Event{e} }(), - expected: func() pdata.LogRecordSlice { + expected: func() plog.LogRecordSlice { lrs := buildDefaultLogs() - lrs.At(0).Attributes().Upsert("com.splunk.signalfx.event_category", pdata.NewValueEmpty()) + lrs.At(0).Attributes().Upsert("com.splunk.signalfx.event_category", pcommon.NewValueEmpty()) return lrs }(), }, @@ -100,7 +101,7 @@ func TestSignalFxV2EventsToLogData(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - lrs := pdata.NewLogRecordSlice() + lrs := plog.NewLogRecordSlice() signalFxV2EventsToLogRecords(tt.sfxEvents, lrs) for i := 0; i < lrs.Len(); i++ { lrs.At(i).Attributes().Sort() diff --git a/receiver/simpleprometheusreceiver/examples/federation/prom-counter/go.mod b/receiver/simpleprometheusreceiver/examples/federation/prom-counter/go.mod index e9dabb689ba2..e4eda86c5fde 100644 --- a/receiver/simpleprometheusreceiver/examples/federation/prom-counter/go.mod +++ b/receiver/simpleprometheusreceiver/examples/federation/prom-counter/go.mod @@ -28,3 +28,5 @@ require ( golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect google.golang.org/protobuf v1.28.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/simpleprometheusreceiver/go.mod b/receiver/simpleprometheusreceiver/go.mod index 6ce33a2bffac..7b44c5225fab 100644 --- a/receiver/simpleprometheusreceiver/go.mod +++ b/receiver/simpleprometheusreceiver/go.mod @@ -7,7 +7,7 @@ require ( github.com/prometheus/common v0.33.0 github.com/prometheus/prometheus v1.8.2-0.20220324155304-4d8bbfd4164c github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d k8s.io/client-go v0.23.5 ) @@ -70,7 +70,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b // indirect github.com/kr/pretty v0.3.0 // indirect github.com/linode/linodego v1.3.0 // indirect @@ -96,10 +96,10 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -146,3 +146,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite => ../../pkg/translator/prometheusremotewrite replace github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver => ../../receiver/prometheusreceiver + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/simpleprometheusreceiver/go.sum b/receiver/simpleprometheusreceiver/go.sum index 0af20b5648d2..3bbca621bb05 100644 --- a/receiver/simpleprometheusreceiver/go.sum +++ b/receiver/simpleprometheusreceiver/go.sum @@ -51,7 +51,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -187,8 +187,8 @@ github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8n github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -848,8 +848,8 @@ github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1169,8 +1169,6 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -1289,10 +1287,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= diff --git a/receiver/skywalkingreceiver/go.mod b/receiver/skywalkingreceiver/go.mod index 8f842baca39b..5e3048c10826 100644 --- a/receiver/skywalkingreceiver/go.mod +++ b/receiver/skywalkingreceiver/go.mod @@ -5,8 +5,9 @@ go 1.17 require ( github.com/gorilla/mux v1.8.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 google.golang.org/grpc v1.45.0 google.golang.org/protobuf v1.28.0 @@ -24,7 +25,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -33,7 +34,6 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect @@ -42,7 +42,7 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sys v0.0.0-20220209214540-3681064d5158 // indirect golang.org/x/text v0.3.7 // indirect @@ -51,3 +51,5 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/skywalkingreceiver/go.sum b/receiver/skywalkingreceiver/go.sum index 021b4995be21..b2e9f484709d 100644 --- a/receiver/skywalkingreceiver/go.sum +++ b/receiver/skywalkingreceiver/go.sum @@ -65,7 +65,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -232,8 +232,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -291,8 +291,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/schollz/progressbar/v2 v2.13.2/go.mod h1:6YZjqdthH6SCZKv2rqGryrxPtfmRB/DWZxSMfCXPyD8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -317,10 +315,12 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0/go.mod h1:SY9qHHUES6W3oZnO1H2W8NvsSovIoXRg/A1AH9px8+I= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= @@ -332,7 +332,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -424,8 +424,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= diff --git a/receiver/skywalkingreceiver/skywalkingproto_to_traces.go b/receiver/skywalkingreceiver/skywalkingproto_to_traces.go index 6f335df04ba1..71109446ba97 100644 --- a/receiver/skywalkingreceiver/skywalkingproto_to_traces.go +++ b/receiver/skywalkingreceiver/skywalkingproto_to_traces.go @@ -20,8 +20,9 @@ import ( "time" "unsafe" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.8.0" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" common "skywalking.apache.org/repo/goapi/collect/common/v3" agentV3 "skywalking.apache.org/repo/goapi/collect/language/agent/v3" ) @@ -42,8 +43,8 @@ var otSpanTagsMapping = map[string]string{ "mq.broker": conventions.AttributeNetPeerName, } -func SkywalkingToTraces(segment *agentV3.SegmentObject) pdata.Traces { - traceData := pdata.NewTraces() +func SkywalkingToTraces(segment *agentV3.SegmentObject) ptrace.Traces { + traceData := ptrace.NewTraces() swSpans := segment.Spans if swSpans == nil && len(swSpans) == 0 { @@ -54,8 +55,8 @@ func SkywalkingToTraces(segment *agentV3.SegmentObject) pdata.Traces { rs := resourceSpan.Resource() for _, span := range swSpans { swTagsToInternalResource(span, rs) - rs.Attributes().Insert(conventions.AttributeServiceName, pdata.NewValueString(segment.GetService())) - rs.Attributes().Insert(conventions.AttributeServiceInstanceID, pdata.NewValueString(segment.GetServiceInstance())) + rs.Attributes().Insert(conventions.AttributeServiceName, pcommon.NewValueString(segment.GetService())) + rs.Attributes().Insert(conventions.AttributeServiceInstanceID, pcommon.NewValueString(segment.GetServiceInstance())) } il := resourceSpan.ScopeSpans().AppendEmpty() @@ -64,7 +65,7 @@ func SkywalkingToTraces(segment *agentV3.SegmentObject) pdata.Traces { return traceData } -func swTagsToInternalResource(span *agentV3.SpanObject, dest pdata.Resource) { +func swTagsToInternalResource(span *agentV3.SpanObject, dest pcommon.Resource) { if span == nil { return } @@ -85,7 +86,7 @@ func swTagsToInternalResource(span *agentV3.SpanObject, dest pdata.Resource) { } } -func swSpansToSpanSlice(traceID string, spans []*agentV3.SpanObject, dest pdata.SpanSlice) { +func swSpansToSpanSlice(traceID string, spans []*agentV3.SpanObject, dest ptrace.SpanSlice) { if len(spans) == 0 { return } @@ -99,7 +100,7 @@ func swSpansToSpanSlice(traceID string, spans []*agentV3.SpanObject, dest pdata. } } -func swSpanToSpan(traceID string, span *agentV3.SpanObject, dest pdata.Span) { +func swSpanToSpan(traceID string, span *agentV3.SpanObject, dest ptrace.Span) { dest.SetTraceID(stringToTraceID(traceID)) dest.SetSpanID(uInt32ToSpanID(uint32(span.GetSpanId()))) @@ -125,18 +126,18 @@ func swSpanToSpan(traceID string, span *agentV3.SpanObject, dest pdata.Span) { switch { case span.SpanLayer == agentV3.SpanLayer_MQ: if span.SpanType == agentV3.SpanType_Entry { - dest.SetKind(pdata.SpanKindConsumer) + dest.SetKind(ptrace.SpanKindConsumer) } else if span.SpanType == agentV3.SpanType_Exit { - dest.SetKind(pdata.SpanKindProducer) + dest.SetKind(ptrace.SpanKindProducer) } case span.GetSpanType() == agentV3.SpanType_Exit: - dest.SetKind(pdata.SpanKindClient) + dest.SetKind(ptrace.SpanKindClient) case span.GetSpanType() == agentV3.SpanType_Entry: - dest.SetKind(pdata.SpanKindServer) + dest.SetKind(ptrace.SpanKindServer) case span.GetSpanType() == agentV3.SpanType_Local: - dest.SetKind(pdata.SpanKindInternal) + dest.SetKind(ptrace.SpanKindInternal) default: - dest.SetKind(pdata.SpanKindUnspecified) + dest.SetKind(ptrace.SpanKindUnspecified) } swLogsToSpanEvents(span.GetLogs(), dest.Events()) @@ -144,7 +145,7 @@ func swSpanToSpan(traceID string, span *agentV3.SpanObject, dest pdata.Span) { swReferencesToSpanLinks(span.Refs, dest.Links()) } -func swReferencesToSpanLinks(refs []*agentV3.SegmentReference, dest pdata.SpanLinkSlice) { +func swReferencesToSpanLinks(refs []*agentV3.SegmentReference, dest ptrace.SpanLinkSlice) { if len(refs) == 0 { return } @@ -182,24 +183,24 @@ func swReferencesToSpanLinks(refs []*agentV3.SegmentReference, dest pdata.SpanLi } } -func setInternalSpanStatus(span *agentV3.SpanObject, dest pdata.SpanStatus) { +func setInternalSpanStatus(span *agentV3.SpanObject, dest ptrace.SpanStatus) { if span.GetIsError() { - dest.SetCode(pdata.StatusCodeError) + dest.SetCode(ptrace.StatusCodeError) dest.SetMessage("ERROR") } else { - dest.SetCode(pdata.StatusCodeOk) + dest.SetCode(ptrace.StatusCodeOk) dest.SetMessage("SUCCESS") } } -func swLogsToSpanEvents(logs []*agentV3.Log, dest pdata.SpanEventSlice) { +func swLogsToSpanEvents(logs []*agentV3.Log, dest ptrace.SpanEventSlice) { if len(logs) == 0 { return } dest.EnsureCapacity(len(logs)) for i, log := range logs { - var event pdata.SpanEvent + var event ptrace.SpanEvent if dest.Len() > i { event = dest.At(i) } else { @@ -219,7 +220,7 @@ func swLogsToSpanEvents(logs []*agentV3.Log, dest pdata.SpanEventSlice) { } } -func swKvPairsToInternalAttributes(pairs []*common.KeyStringValuePair, dest pdata.Map) { +func swKvPairsToInternalAttributes(pairs []*common.KeyStringValuePair, dest pcommon.Map) { if pairs == nil { return } @@ -229,24 +230,24 @@ func swKvPairsToInternalAttributes(pairs []*common.KeyStringValuePair, dest pdat } } -// microsecondsToTimestamp converts epoch microseconds to pdata.Timestamp -func microsecondsToTimestamp(ms int64) pdata.Timestamp { - return pdata.NewTimestampFromTime(time.UnixMilli(ms)) +// microsecondsToTimestamp converts epoch microseconds to pcommon.Timestamp +func microsecondsToTimestamp(ms int64) pcommon.Timestamp { + return pcommon.NewTimestampFromTime(time.UnixMilli(ms)) } -func stringToTraceID(traceID string) pdata.TraceID { - return pdata.NewTraceID(unsafeStringToBytes(traceID)) +func stringToTraceID(traceID string) pcommon.TraceID { + return pcommon.NewTraceID(unsafeStringToBytes(traceID)) } -func stringToParentSpanID(traceID string) pdata.SpanID { - return pdata.NewSpanID(unsafeStringTo8Bytes(traceID)) +func stringToParentSpanID(traceID string) pcommon.SpanID { + return pcommon.NewSpanID(unsafeStringTo8Bytes(traceID)) } -// uInt32ToSpanID converts the uint64 representation of a SpanID to pdata.SpanID. -func uInt32ToSpanID(id uint32) pdata.SpanID { +// uInt32ToSpanID converts the uint64 representation of a SpanID to pcommon.SpanID. +func uInt32ToSpanID(id uint32) pcommon.SpanID { spanID := [8]byte{} binary.BigEndian.PutUint32(spanID[:], id) - return pdata.NewSpanID(spanID) + return pcommon.NewSpanID(spanID) } func unsafeStringToBytes(s string) [16]byte { diff --git a/receiver/skywalkingreceiver/skywalkingproto_to_traces_test.go b/receiver/skywalkingreceiver/skywalkingproto_to_traces_test.go index 51e4d4af2f49..f96b5d0f5cfa 100644 --- a/receiver/skywalkingreceiver/skywalkingproto_to_traces_test.go +++ b/receiver/skywalkingreceiver/skywalkingproto_to_traces_test.go @@ -19,7 +19,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/ptrace" agentV3 "skywalking.apache.org/repo/goapi/collect/language/agent/v3" ) @@ -27,8 +27,8 @@ func TestSetInternalSpanStatus(t *testing.T) { tests := []struct { name string swSpan *agentV3.SpanObject - dest pdata.SpanStatus - code pdata.StatusCode + dest ptrace.SpanStatus + code ptrace.StatusCode }{ { name: "StatusCodeError", @@ -36,7 +36,7 @@ func TestSetInternalSpanStatus(t *testing.T) { IsError: true, }, dest: generateTracesOneEmptyResourceSpans().Status(), - code: pdata.StatusCodeError, + code: ptrace.StatusCodeError, }, { name: "StatusCodeOk", @@ -44,7 +44,7 @@ func TestSetInternalSpanStatus(t *testing.T) { IsError: false, }, dest: generateTracesOneEmptyResourceSpans().Status(), - code: pdata.StatusCodeOk, + code: ptrace.StatusCodeOk, }, } @@ -60,7 +60,7 @@ func TestSwKvPairsToInternalAttributes(t *testing.T) { tests := []struct { name string swSpan *agentV3.SegmentObject - dest pdata.Span + dest ptrace.Span }{ { name: "mock-sw-swgment-1", @@ -89,8 +89,8 @@ func TestSwProtoToTraces(t *testing.T) { tests := []struct { name string swSpan *agentV3.SegmentObject - dest pdata.Traces - code pdata.StatusCode + dest ptrace.Traces + code ptrace.StatusCode }{ { name: "mock-sw-swgment-1", @@ -110,7 +110,7 @@ func TestSwReferencesToSpanLinks(t *testing.T) { tests := []struct { name string swSpan *agentV3.SegmentObject - dest pdata.Span + dest ptrace.Span }{ { name: "mock-sw-swgment-1", @@ -136,7 +136,7 @@ func TestSwLogsToSpanEvents(t *testing.T) { tests := []struct { name string swSpan *agentV3.SegmentObject - dest pdata.Span + dest ptrace.Span }{ { name: "mock-sw-swgment-0", @@ -160,8 +160,8 @@ func TestSwLogsToSpanEvents(t *testing.T) { }) } } -func generateTracesOneEmptyResourceSpans() pdata.Span { - td := pdata.NewTraces() +func generateTracesOneEmptyResourceSpans() ptrace.Span { + td := ptrace.NewTraces() resourceSpan := td.ResourceSpans().AppendEmpty() il := resourceSpan.ScopeSpans().AppendEmpty() il.Spans().AppendEmpty() diff --git a/receiver/splunkhecreceiver/go.mod b/receiver/splunkhecreceiver/go.mod index ca86c78b586f..d3544e69244d 100644 --- a/receiver/splunkhecreceiver/go.mod +++ b/receiver/splunkhecreceiver/go.mod @@ -9,13 +9,14 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) require ( - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/go-logr/logr v1.2.3 // indirect @@ -24,7 +25,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -34,7 +35,6 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect @@ -42,10 +42,8 @@ require ( go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -63,3 +61,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/commo replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr => ../../pkg/batchperresourceattr + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/splunkhecreceiver/go.sum b/receiver/splunkhecreceiver/go.sum index d0a773d9e2f5..edda35b3cc7f 100644 --- a/receiver/splunkhecreceiver/go.sum +++ b/receiver/splunkhecreceiver/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -18,17 +17,15 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= @@ -39,7 +36,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -129,8 +125,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -179,9 +175,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -197,10 +190,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -211,7 +206,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -250,8 +245,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -275,13 +270,11 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -307,7 +300,6 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -317,7 +309,6 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -331,7 +322,6 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= diff --git a/receiver/splunkhecreceiver/receiver.go b/receiver/splunkhecreceiver/receiver.go index 9719bfd3b545..92d63d82a534 100644 --- a/receiver/splunkhecreceiver/receiver.go +++ b/receiver/splunkhecreceiver/receiver.go @@ -31,8 +31,9 @@ import ( jsoniter "github.com/json-iterator/go" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -250,7 +251,7 @@ func (r *splunkReceiver) handleRawReq(resp http.ResponseWriter, req *http.Reques sc := bufio.NewScanner(bodyReader) - ld := pdata.NewLogs() + ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() resourceCustomizer := r.createResourceCustomizer(req) if resourceCustomizer != nil { @@ -374,12 +375,12 @@ func (r *splunkReceiver) consumeLogs(ctx context.Context, events []*splunk.Event } } -func (r *splunkReceiver) createResourceCustomizer(req *http.Request) func(resource pdata.Resource) { +func (r *splunkReceiver) createResourceCustomizer(req *http.Request) func(resource pcommon.Resource) { if r.config.AccessTokenPassthrough { accessToken := req.Header.Get("Authorization") if strings.HasPrefix(accessToken, splunk.HECTokenHeader+" ") { accessTokenValue := accessToken[len(splunk.HECTokenHeader)+1:] - return func(resource pdata.Resource) { + return func(resource pcommon.Resource) { resource.Attributes().InsertString(splunk.HecTokenLabel, accessTokenValue) } } diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go index 502dfbeb2ec7..aec29b40ec82 100644 --- a/receiver/splunkhecreceiver/receiver_test.go +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -37,7 +37,8 @@ import ( "go.opentelemetry.io/collector/config/configtls" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/splunkhecexporter" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" @@ -393,7 +394,7 @@ func Test_splunkhecReceiver_TLS(t *testing.T) { <-time.After(500 * time.Millisecond) t.Log("Event Reception Started") - logs := pdata.NewLogs() + logs := plog.NewLogs() rl := logs.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() lr := sl.LogRecords().AppendEmpty() @@ -401,7 +402,7 @@ func Test_splunkhecReceiver_TLS(t *testing.T) { now := time.Now() msecInt64 := now.UnixNano() / 1e6 sec := float64(msecInt64) / 1e3 - lr.SetTimestamp(pdata.Timestamp(int64(sec * 1e9))) + lr.SetTimestamp(pcommon.Timestamp(int64(sec * 1e9))) lr.Body().SetStringVal("foo") lr.Attributes().InsertString("com.splunk.sourcetype", "custom:sourcetype") diff --git a/receiver/splunkhecreceiver/splunk_to_logdata.go b/receiver/splunkhecreceiver/splunk_to_logdata.go index b270f1d926f7..a4ebca0c9932 100644 --- a/receiver/splunkhecreceiver/splunk_to_logdata.go +++ b/receiver/splunkhecreceiver/splunk_to_logdata.go @@ -18,7 +18,8 @@ import ( "errors" "sort" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -29,8 +30,8 @@ const ( ) // splunkHecToLogData transforms splunk events into logs -func splunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pdata.Resource), config *Config) (pdata.Logs, error) { - ld := pdata.NewLogs() +func splunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (plog.Logs, error) { + ld := plog.NewLogs() rl := ld.ResourceLogs().AppendEmpty() sl := rl.ScopeLogs().AppendEmpty() for _, event := range events { @@ -46,7 +47,7 @@ func splunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCust // Splunk timestamps are in seconds so convert to nanos by multiplying // by 1 billion. if event.Time != nil { - logRecord.SetTimestamp(pdata.Timestamp(*event.Time * 1e9)) + logRecord.SetTimestamp(pcommon.Timestamp(*event.Time * 1e9)) } if event.Host != "" { @@ -82,37 +83,37 @@ func splunkHecToLogData(logger *zap.Logger, events []*splunk.Event, resourceCust return ld, nil } -func convertInterfaceToAttributeValue(logger *zap.Logger, originalValue interface{}) (pdata.Value, error) { +func convertInterfaceToAttributeValue(logger *zap.Logger, originalValue interface{}) (pcommon.Value, error) { if originalValue == nil { - return pdata.NewValueEmpty(), nil + return pcommon.NewValueEmpty(), nil } else if value, ok := originalValue.(string); ok { - return pdata.NewValueString(value), nil + return pcommon.NewValueString(value), nil } else if value, ok := originalValue.(int64); ok { - return pdata.NewValueInt(value), nil + return pcommon.NewValueInt(value), nil } else if value, ok := originalValue.(float64); ok { - return pdata.NewValueDouble(value), nil + return pcommon.NewValueDouble(value), nil } else if value, ok := originalValue.(bool); ok { - return pdata.NewValueBool(value), nil + return pcommon.NewValueBool(value), nil } else if value, ok := originalValue.(map[string]interface{}); ok { mapValue, err := convertToAttributeMap(logger, value) if err != nil { - return pdata.NewValueEmpty(), err + return pcommon.NewValueEmpty(), err } return mapValue, nil } else if value, ok := originalValue.([]interface{}); ok { arrValue, err := convertToSliceVal(logger, value) if err != nil { - return pdata.NewValueEmpty(), err + return pcommon.NewValueEmpty(), err } return arrValue, nil } else { logger.Debug("Unsupported value conversion", zap.Any("value", originalValue)) - return pdata.NewValueEmpty(), errors.New(cannotConvertValue) + return pcommon.NewValueEmpty(), errors.New(cannotConvertValue) } } -func convertToSliceVal(logger *zap.Logger, value []interface{}) (pdata.Value, error) { - attrVal := pdata.NewValueSlice() +func convertToSliceVal(logger *zap.Logger, value []interface{}) (pcommon.Value, error) { + attrVal := pcommon.NewValueSlice() arr := attrVal.SliceVal() for _, elt := range value { translatedElt, err := convertInterfaceToAttributeValue(logger, elt) @@ -125,8 +126,8 @@ func convertToSliceVal(logger *zap.Logger, value []interface{}) (pdata.Value, er return attrVal, nil } -func convertToAttributeMap(logger *zap.Logger, value map[string]interface{}) (pdata.Value, error) { - attrVal := pdata.NewValueMap() +func convertToAttributeMap(logger *zap.Logger, value map[string]interface{}) (pcommon.Value, error) { + attrVal := pcommon.NewValueMap() attrMap := attrVal.MapVal() keys := make([]string, 0, len(value)) for k := range value { diff --git a/receiver/splunkhecreceiver/splunk_to_logdata_test.go b/receiver/splunkhecreceiver/splunk_to_logdata_test.go index e9e6c5676b99..21df58316731 100644 --- a/receiver/splunkhecreceiver/splunk_to_logdata_test.go +++ b/receiver/splunkhecreceiver/splunk_to_logdata_test.go @@ -18,8 +18,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -42,7 +43,7 @@ func Test_SplunkHecToLogData(t *testing.T) { tests := []struct { name string event splunk.Event - output pdata.ResourceLogsSlice + output plog.ResourceLogsSlice hecConfig *Config wantErr error }{ @@ -60,7 +61,7 @@ func Test_SplunkHecToLogData(t *testing.T) { }, }, hecConfig: defaultTestingHecConfig, - output: func() pdata.ResourceLogsSlice { + output: func() plog.ResourceLogsSlice { return createLogsSlice(nanoseconds) }(), wantErr: nil, @@ -79,7 +80,7 @@ func Test_SplunkHecToLogData(t *testing.T) { }, }, hecConfig: defaultTestingHecConfig, - output: func() pdata.ResourceLogsSlice { + output: func() plog.ResourceLogsSlice { logsSlice := createLogsSlice(nanoseconds) logsSlice.At(0).ScopeLogs().At(0).LogRecords().At(0).Body().SetDoubleVal(12.3) return logsSlice @@ -100,9 +101,9 @@ func Test_SplunkHecToLogData(t *testing.T) { }, }, hecConfig: defaultTestingHecConfig, - output: func() pdata.ResourceLogsSlice { + output: func() plog.ResourceLogsSlice { logsSlice := createLogsSlice(nanoseconds) - arrVal := pdata.NewValueSlice() + arrVal := pcommon.NewValueSlice() arr := arrVal.SliceVal() arr.AppendEmpty().SetStringVal("foo") arr.AppendEmpty().SetStringVal("bar") @@ -125,16 +126,16 @@ func Test_SplunkHecToLogData(t *testing.T) { }, }, hecConfig: defaultTestingHecConfig, - output: func() pdata.ResourceLogsSlice { + output: func() plog.ResourceLogsSlice { logsSlice := createLogsSlice(nanoseconds) - foosArr := pdata.NewValueSlice() + foosArr := pcommon.NewValueSlice() foos := foosArr.SliceVal() foos.EnsureCapacity(3) foos.AppendEmpty().SetStringVal("foo") foos.AppendEmpty().SetStringVal("bar") foos.AppendEmpty().SetStringVal("foobar") - attVal := pdata.NewValueMap() + attVal := pcommon.NewValueMap() attMap := attVal.MapVal() attMap.InsertBool("bool", false) attMap.Insert("foos", foosArr) @@ -158,7 +159,7 @@ func Test_SplunkHecToLogData(t *testing.T) { }, }, hecConfig: defaultTestingHecConfig, - output: func() pdata.ResourceLogsSlice { + output: func() plog.ResourceLogsSlice { return createLogsSlice(0) }(), wantErr: nil, @@ -184,13 +185,13 @@ func Test_SplunkHecToLogData(t *testing.T) { Host: "myhost", }, }, - output: func() pdata.ResourceLogsSlice { - lrs := pdata.NewResourceLogsSlice() + output: func() plog.ResourceLogsSlice { + lrs := plog.NewResourceLogsSlice() lr := lrs.AppendEmpty() sl := lr.ScopeLogs().AppendEmpty() logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("value") - logRecord.SetTimestamp(pdata.Timestamp(0)) + logRecord.SetTimestamp(pcommon.Timestamp(0)) logRecord.Attributes().InsertString("myhost", "localhost") logRecord.Attributes().InsertString("mysource", "mysource") logRecord.Attributes().InsertString("mysourcetype", "mysourcetype") @@ -203,7 +204,7 @@ func Test_SplunkHecToLogData(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result, err := splunkHecToLogData(zap.NewNop(), []*splunk.Event{&tt.event}, func(resource pdata.Resource) {}, tt.hecConfig) + result, err := splunkHecToLogData(zap.NewNop(), []*splunk.Event{&tt.event}, func(resource pcommon.Resource) {}, tt.hecConfig) assert.Equal(t, tt.wantErr, err) assert.Equal(t, tt.output.Len(), result.ResourceLogs().Len()) assert.Equal(t, tt.output.At(0), result.ResourceLogs().At(0)) @@ -211,13 +212,13 @@ func Test_SplunkHecToLogData(t *testing.T) { } } -func createLogsSlice(nanoseconds int) pdata.ResourceLogsSlice { - lrs := pdata.NewResourceLogsSlice() +func createLogsSlice(nanoseconds int) plog.ResourceLogsSlice { + lrs := plog.NewResourceLogsSlice() lr := lrs.AppendEmpty() sl := lr.ScopeLogs().AppendEmpty() logRecord := sl.LogRecords().AppendEmpty() logRecord.Body().SetStringVal("value") - logRecord.SetTimestamp(pdata.Timestamp(nanoseconds)) + logRecord.SetTimestamp(pcommon.Timestamp(nanoseconds)) logRecord.Attributes().InsertString("host.name", "localhost") logRecord.Attributes().InsertString("com.splunk.source", "mysource") logRecord.Attributes().InsertString("com.splunk.sourcetype", "mysourcetype") @@ -230,31 +231,31 @@ func createLogsSlice(nanoseconds int) pdata.ResourceLogsSlice { func Test_ConvertAttributeValueEmpty(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), nil) assert.NoError(t, err) - assert.Equal(t, pdata.NewValueEmpty(), value) + assert.Equal(t, pcommon.NewValueEmpty(), value) } func Test_ConvertAttributeValueString(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), "foo") assert.NoError(t, err) - assert.Equal(t, pdata.NewValueString("foo"), value) + assert.Equal(t, pcommon.NewValueString("foo"), value) } func Test_ConvertAttributeValueBool(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), false) assert.NoError(t, err) - assert.Equal(t, pdata.NewValueBool(false), value) + assert.Equal(t, pcommon.NewValueBool(false), value) } func Test_ConvertAttributeValueFloat(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), 12.3) assert.NoError(t, err) - assert.Equal(t, pdata.NewValueDouble(12.3), value) + assert.Equal(t, pcommon.NewValueDouble(12.3), value) } func Test_ConvertAttributeValueMap(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), map[string]interface{}{"foo": "bar"}) assert.NoError(t, err) - atts := pdata.NewValueMap() + atts := pcommon.NewValueMap() attMap := atts.MapVal() attMap.InsertString("foo", "bar") assert.Equal(t, atts, value) @@ -263,7 +264,7 @@ func Test_ConvertAttributeValueMap(t *testing.T) { func Test_ConvertAttributeValueArray(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), []interface{}{"foo"}) assert.NoError(t, err) - arrValue := pdata.NewValueSlice() + arrValue := pcommon.NewValueSlice() arr := arrValue.SliceVal() arr.AppendEmpty().SetStringVal("foo") assert.Equal(t, arrValue, value) @@ -272,17 +273,17 @@ func Test_ConvertAttributeValueArray(t *testing.T) { func Test_ConvertAttributeValueInvalid(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), splunk.Event{}) assert.Error(t, err) - assert.Equal(t, pdata.NewValueEmpty(), value) + assert.Equal(t, pcommon.NewValueEmpty(), value) } func Test_ConvertAttributeValueInvalidInMap(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), map[string]interface{}{"foo": splunk.Event{}}) assert.Error(t, err) - assert.Equal(t, pdata.NewValueEmpty(), value) + assert.Equal(t, pcommon.NewValueEmpty(), value) } func Test_ConvertAttributeValueInvalidInArray(t *testing.T) { value, err := convertInterfaceToAttributeValue(zap.NewNop(), []interface{}{splunk.Event{}}) assert.Error(t, err) - assert.Equal(t, pdata.NewValueEmpty(), value) + assert.Equal(t, pcommon.NewValueEmpty(), value) } diff --git a/receiver/splunkhecreceiver/splunkhec_to_metricdata.go b/receiver/splunkhecreceiver/splunkhec_to_metricdata.go index 35934c492a63..4700c3531c0d 100644 --- a/receiver/splunkhecreceiver/splunkhec_to_metricdata.go +++ b/receiver/splunkhecreceiver/splunkhec_to_metricdata.go @@ -19,21 +19,22 @@ import ( "strconv" "strings" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" ) // splunkHecToMetricsData converts Splunk HEC metric points to -// pdata.Metrics. Returning the converted data and the number of +// pmetric.Metrics. Returning the converted data and the number of // dropped time series. -func splunkHecToMetricsData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pdata.Resource), config *Config) (pdata.Metrics, int) { +func splunkHecToMetricsData(logger *zap.Logger, events []*splunk.Event, resourceCustomizer func(pcommon.Resource), config *Config) (pmetric.Metrics, int) { numDroppedTimeSeries := 0 - md := pdata.NewMetrics() + md := pmetric.NewMetrics() for _, event := range events { - resourceMetrics := pdata.NewResourceMetrics() + resourceMetrics := pmetric.NewResourceMetrics() if resourceCustomizer != nil { resourceCustomizer(resourceMetrics.Resource()) } @@ -58,7 +59,7 @@ func splunkHecToMetricsData(logger *zap.Logger, events []*splunk.Event, resource metrics := resourceMetrics.ScopeMetrics().AppendEmpty().Metrics() for metricName, metricValue := range values { pointTimestamp := convertTimestamp(event.Time) - metric := pdata.NewMetric() + metric := pmetric.NewMetric() metric.SetName(metricName) switch v := metricValue.(type) { @@ -91,7 +92,7 @@ func splunkHecToMetricsData(logger *zap.Logger, events []*splunk.Event, resource return md, numDroppedTimeSeries } -func convertString(logger *zap.Logger, numDroppedTimeSeries *int, metrics pdata.MetricSlice, metricName string, pointTimestamp pdata.Timestamp, s string, attributes pdata.Map) { +func convertString(logger *zap.Logger, numDroppedTimeSeries *int, metrics pmetric.MetricSlice, metricName string, pointTimestamp pcommon.Timestamp, s string, attributes pcommon.Map) { // best effort, cast to string and turn into a number dbl, err := strconv.ParseFloat(s, 64) if err != nil { @@ -103,37 +104,37 @@ func convertString(logger *zap.Logger, numDroppedTimeSeries *int, metrics pdata. } } -func addIntGauge(metrics pdata.MetricSlice, metricName string, value int64, ts pdata.Timestamp, attributes pdata.Map) { +func addIntGauge(metrics pmetric.MetricSlice, metricName string, value int64, ts pcommon.Timestamp, attributes pcommon.Map) { metric := metrics.AppendEmpty() metric.SetName(metricName) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) intPt := metric.Gauge().DataPoints().AppendEmpty() intPt.SetTimestamp(ts) intPt.SetIntVal(value) attributes.CopyTo(intPt.Attributes()) } -func addDoubleGauge(metrics pdata.MetricSlice, metricName string, value float64, ts pdata.Timestamp, attributes pdata.Map) { +func addDoubleGauge(metrics pmetric.MetricSlice, metricName string, value float64, ts pcommon.Timestamp, attributes pcommon.Map) { metric := metrics.AppendEmpty() metric.SetName(metricName) - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) doublePt := metric.Gauge().DataPoints().AppendEmpty() doublePt.SetTimestamp(ts) doublePt.SetDoubleVal(value) attributes.CopyTo(doublePt.Attributes()) } -func convertTimestamp(sec *float64) pdata.Timestamp { +func convertTimestamp(sec *float64) pcommon.Timestamp { if sec == nil { return 0 } - return pdata.Timestamp(*sec * 1e9) + return pcommon.Timestamp(*sec * 1e9) } // Extract dimensions from the Splunk event fields to populate metric data point attributes. -func buildAttributes(dimensions map[string]interface{}) pdata.Map { - attributes := pdata.NewMap() +func buildAttributes(dimensions map[string]interface{}) pcommon.Map { + attributes := pcommon.NewMap() attributes.EnsureCapacity(len(dimensions)) for key, val := range dimensions { diff --git a/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go b/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go index 959417f0b3e7..2efdac0c483c 100644 --- a/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go +++ b/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go @@ -20,7 +20,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk" @@ -53,7 +54,7 @@ func Test_splunkV2ToMetricsData(t *testing.T) { tests := []struct { name string splunkDataPoint *splunk.Event - wantMetricsData pdata.Metrics + wantMetricsData pmetric.Metrics wantDroppedTimeseries int hecConfig *Config }{ @@ -71,26 +72,26 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:yetanotherandanother"] = int64Ptr(15) return pt }(), - wantMetricsData: func() pdata.Metrics { + wantMetricsData: func() pmetric.Metrics { metrics := buildDefaultMetricsData(nanos) mts := metrics.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() metricPt := mts.AppendEmpty() - metricPt.SetDataType(pdata.MetricDataTypeGauge) + metricPt.SetDataType(pmetric.MetricDataTypeGauge) metricPt.SetName("yetanother") intPt := metricPt.Gauge().DataPoints().AppendEmpty() intPt.SetIntVal(14) - intPt.SetTimestamp(pdata.Timestamp(nanos)) + intPt.SetTimestamp(pcommon.Timestamp(nanos)) intPt.Attributes().InsertString("k0", "v0") intPt.Attributes().InsertString("k1", "v1") intPt.Attributes().InsertString("k2", "v2") metricPt2 := mts.AppendEmpty() - metricPt2.SetDataType(pdata.MetricDataTypeGauge) + metricPt2.SetDataType(pmetric.MetricDataTypeGauge) metricPt2.SetName("yetanotherandanother") intPt2 := metricPt2.Gauge().DataPoints().AppendEmpty() intPt2.SetIntVal(15) - intPt2.SetTimestamp(pdata.Timestamp(nanos)) + intPt2.SetTimestamp(pcommon.Timestamp(nanos)) intPt2.Attributes().InsertString("k0", "v0") intPt2.Attributes().InsertString("k1", "v1") intPt2.Attributes().InsertString("k2", "v2") @@ -106,15 +107,15 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:single"] = float64Ptr(13.13) return pt }(), - wantMetricsData: func() pdata.Metrics { + wantMetricsData: func() pmetric.Metrics { md := buildDefaultMetricsData(nanos) mts := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() metricPt := mts.At(0) - metricPt.SetDataType(pdata.MetricDataTypeGauge) + metricPt.SetDataType(pmetric.MetricDataTypeGauge) metricPt.SetName("single") doublePt := metricPt.Gauge().DataPoints().AppendEmpty() doublePt.SetDoubleVal(13.13) - doublePt.SetTimestamp(pdata.Timestamp(nanos)) + doublePt.SetTimestamp(pcommon.Timestamp(nanos)) doublePt.Attributes().InsertString("k0", "v0") doublePt.Attributes().InsertString("k1", "v1") doublePt.Attributes().InsertString("k2", "v2") @@ -148,8 +149,8 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:single"] = int64(13) return pt }(), - wantMetricsData: func() pdata.Metrics { - metrics := pdata.NewMetrics() + wantMetricsData: func() pmetric.Metrics { + metrics := pmetric.NewMetrics() resourceMetrics := metrics.ResourceMetrics().AppendEmpty() attrs := resourceMetrics.Resource().Attributes() attrs.InsertString("myhost", "localhost") @@ -158,14 +159,14 @@ func Test_splunkV2ToMetricsData(t *testing.T) { attrs.InsertString("myindex", "index") metricPt := resourceMetrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metricPt.SetDataType(pdata.MetricDataTypeGauge) + metricPt.SetDataType(pmetric.MetricDataTypeGauge) metricPt.SetName("single") intPt := metricPt.Gauge().DataPoints().AppendEmpty() intPt.SetIntVal(13) intPt.Attributes().InsertString("k0", "v0") intPt.Attributes().InsertString("k1", "v1") intPt.Attributes().InsertString("k2", "v2") - intPt.SetTimestamp(pdata.Timestamp(nanos)) + intPt.SetTimestamp(pcommon.Timestamp(nanos)) return metrics }(), hecConfig: &Config{HecToOtelAttrs: splunk.HecToOtelAttrs{ @@ -183,18 +184,18 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:single"] = float64Ptr(13.13) return pt }(), - wantMetricsData: func() pdata.Metrics { + wantMetricsData: func() pmetric.Metrics { md := buildDefaultMetricsData(nanos) mts := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() metricPt := mts.At(0) - metricPt.SetDataType(pdata.MetricDataTypeGauge) + metricPt.SetDataType(pmetric.MetricDataTypeGauge) metricPt.SetName("single") doublePt := metricPt.Gauge().DataPoints().AppendEmpty() doublePt.SetDoubleVal(13.13) doublePt.Attributes().InsertString("k0", "v0") doublePt.Attributes().InsertString("k1", "v1") doublePt.Attributes().InsertString("k2", "v2") - doublePt.SetTimestamp(pdata.Timestamp(nanos)) + doublePt.SetTimestamp(pcommon.Timestamp(nanos)) return md }(), hecConfig: defaultTestingHecConfig, @@ -206,18 +207,18 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:single"] = strPtr("13.13") return pt }(), - wantMetricsData: func() pdata.Metrics { + wantMetricsData: func() pmetric.Metrics { md := buildDefaultMetricsData(nanos) mts := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() metricPt := mts.At(0) - metricPt.SetDataType(pdata.MetricDataTypeGauge) + metricPt.SetDataType(pmetric.MetricDataTypeGauge) metricPt.SetName("single") doublePt := metricPt.Gauge().DataPoints().AppendEmpty() doublePt.SetDoubleVal(13.13) doublePt.Attributes().InsertString("k0", "v0") doublePt.Attributes().InsertString("k1", "v1") doublePt.Attributes().InsertString("k2", "v2") - doublePt.SetTimestamp(pdata.Timestamp(nanos)) + doublePt.SetTimestamp(pcommon.Timestamp(nanos)) return md }(), hecConfig: defaultTestingHecConfig, @@ -229,18 +230,18 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:single"] = "13.13" return pt }(), - wantMetricsData: func() pdata.Metrics { + wantMetricsData: func() pmetric.Metrics { md := buildDefaultMetricsData(nanos) mts := md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() metricPt := mts.At(0) - metricPt.SetDataType(pdata.MetricDataTypeGauge) + metricPt.SetDataType(pmetric.MetricDataTypeGauge) metricPt.SetName("single") doublePt := metricPt.Gauge().DataPoints().AppendEmpty() doublePt.SetDoubleVal(13.13) doublePt.Attributes().InsertString("k0", "v0") doublePt.Attributes().InsertString("k1", "v1") doublePt.Attributes().InsertString("k2", "v2") - doublePt.SetTimestamp(pdata.Timestamp(nanos)) + doublePt.SetTimestamp(pcommon.Timestamp(nanos)) return md }(), hecConfig: defaultTestingHecConfig, @@ -252,7 +253,7 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Time = new(float64) return pt }(), - wantMetricsData: func() pdata.Metrics { + wantMetricsData: func() pmetric.Metrics { return buildDefaultMetricsData(0) }(), hecConfig: defaultTestingHecConfig, @@ -264,7 +265,7 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["k0"] = "" return pt }(), - wantMetricsData: func() pdata.Metrics { + wantMetricsData: func() pmetric.Metrics { md := buildDefaultMetricsData(nanos) md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Gauge().DataPoints().At(0).Attributes().UpdateString("k0", "") return md @@ -278,8 +279,8 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:single"] = "foo" return pt }(), - wantMetricsData: func() pdata.Metrics { - return pdata.NewMetrics() + wantMetricsData: func() pmetric.Metrics { + return pmetric.NewMetrics() }(), hecConfig: defaultTestingHecConfig, wantDroppedTimeseries: 1, @@ -292,8 +293,8 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name:single"] = &value return pt }(), - wantMetricsData: func() pdata.Metrics { - return pdata.NewMetrics() + wantMetricsData: func() pmetric.Metrics { + return pmetric.NewMetrics() }(), wantDroppedTimeseries: 1, hecConfig: defaultTestingHecConfig, @@ -314,15 +315,15 @@ func Test_splunkV2ToMetricsData(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - md, numDroppedTimeseries := splunkHecToMetricsData(zap.NewNop(), []*splunk.Event{tt.splunkDataPoint}, func(resource pdata.Resource) {}, tt.hecConfig) + md, numDroppedTimeseries := splunkHecToMetricsData(zap.NewNop(), []*splunk.Event{tt.splunkDataPoint}, func(resource pcommon.Resource) {}, tt.hecConfig) assert.Equal(t, tt.wantDroppedTimeseries, numDroppedTimeseries) assert.EqualValues(t, tt.wantMetricsData, sortMetricsAndLabels(md)) }) } } -func buildDefaultMetricsData(time int64) pdata.Metrics { - metrics := pdata.NewMetrics() +func buildDefaultMetricsData(time int64) pmetric.Metrics { + metrics := pmetric.NewMetrics() resourceMetrics := metrics.ResourceMetrics().AppendEmpty() attrs := resourceMetrics.Resource().Attributes() attrs.InsertString("host.name", "localhost") @@ -331,14 +332,14 @@ func buildDefaultMetricsData(time int64) pdata.Metrics { attrs.InsertString("com.splunk.index", "index") metricPt := resourceMetrics.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() - metricPt.SetDataType(pdata.MetricDataTypeGauge) + metricPt.SetDataType(pmetric.MetricDataTypeGauge) metricPt.SetName("single") intPt := metricPt.Gauge().DataPoints().AppendEmpty() intPt.SetIntVal(13) intPt.Attributes().InsertString("k0", "v0") intPt.Attributes().InsertString("k1", "v1") intPt.Attributes().InsertString("k2", "v2") - intPt.SetTimestamp(pdata.Timestamp(time)) + intPt.SetTimestamp(pcommon.Timestamp(time)) return metrics } @@ -357,7 +358,7 @@ func float64Ptr(f float64) *float64 { return &l } -func sortMetricsAndLabels(md pdata.Metrics) pdata.Metrics { +func sortMetricsAndLabels(md pmetric.Metrics) pmetric.Metrics { for i := 0; i < md.ResourceMetrics().Len(); i++ { rm := md.ResourceMetrics().At(i) for j := 0; j < rm.ScopeMetrics().Len(); j++ { @@ -368,14 +369,14 @@ func sortMetricsAndLabels(md pdata.Metrics) pdata.Metrics { return md } -func internalSortMetricsAndLabels(metrics pdata.MetricSlice) { - dest := pdata.NewMetricSlice() - metricsMap := make(map[string]pdata.Metric) +func internalSortMetricsAndLabels(metrics pmetric.MetricSlice) { + dest := pmetric.NewMetricSlice() + metricsMap := make(map[string]pmetric.Metric) for k := 0; k < metrics.Len(); k++ { m := metrics.At(k) metricsMap[m.Name()] = m switch m.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: dps := m.Gauge().DataPoints() for l := 0; l < dps.Len(); l++ { dps.At(l).Attributes().Sort() diff --git a/receiver/statsdreceiver/go.mod b/receiver/statsdreceiver/go.mod index 22a607315a1e..8c132b5bb3fb 100644 --- a/receiver/statsdreceiver/go.mod +++ b/receiver/statsdreceiver/go.mod @@ -6,8 +6,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.48.0 github.com/stretchr/testify v1.7.1 go.opencensus.io v0.23.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.opentelemetry.io/otel v1.6.3 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 @@ -21,22 +21,23 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.6.1 // indirect + go.opentelemetry.io/otel/sdk v1.6.3 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/statsdreceiver/go.sum b/receiver/statsdreceiver/go.sum index 78e75cbc930e..22e7a893eb22 100644 --- a/receiver/statsdreceiver/go.sum +++ b/receiver/statsdreceiver/go.sum @@ -17,7 +17,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -74,7 +74,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -104,8 +103,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= @@ -150,8 +149,6 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -166,20 +163,18 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= -go.opentelemetry.io/otel v1.6.1/go.mod h1:blzUabWHkX6LJewxvadmzafgh/wnvBSDBdOuwkAtrWQ= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= -go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= -go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= +go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -221,7 +216,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -247,8 +242,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/statsdreceiver/protocol/metric_translator.go b/receiver/statsdreceiver/protocol/metric_translator.go index a599dd3b26bc..e6eb7bc125e9 100644 --- a/receiver/statsdreceiver/protocol/metric_translator.go +++ b/receiver/statsdreceiver/protocol/metric_translator.go @@ -18,7 +18,8 @@ import ( "sort" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "gonum.org/v1/gonum/stat" ) @@ -26,22 +27,22 @@ var ( statsDDefaultPercentiles = []float64{0, 10, 50, 90, 95, 100} ) -func buildCounterMetric(parsedMetric statsDMetric, isMonotonicCounter bool, timeNow, lastIntervalTime time.Time) pdata.ScopeMetrics { - ilm := pdata.NewScopeMetrics() +func buildCounterMetric(parsedMetric statsDMetric, isMonotonicCounter bool, timeNow, lastIntervalTime time.Time) pmetric.ScopeMetrics { + ilm := pmetric.NewScopeMetrics() nm := ilm.Metrics().AppendEmpty() nm.SetName(parsedMetric.description.name) if parsedMetric.unit != "" { nm.SetUnit(parsedMetric.unit) } - nm.SetDataType(pdata.MetricDataTypeSum) + nm.SetDataType(pmetric.MetricDataTypeSum) - nm.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + nm.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) nm.Sum().SetIsMonotonic(isMonotonicCounter) dp := nm.Sum().DataPoints().AppendEmpty() dp.SetIntVal(parsedMetric.counterValue()) - dp.SetStartTimestamp(pdata.NewTimestampFromTime(lastIntervalTime)) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(lastIntervalTime)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for i := parsedMetric.description.attrs.Iter(); i.Next(); { dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } @@ -49,17 +50,17 @@ func buildCounterMetric(parsedMetric statsDMetric, isMonotonicCounter bool, time return ilm } -func buildGaugeMetric(parsedMetric statsDMetric, timeNow time.Time) pdata.ScopeMetrics { - ilm := pdata.NewScopeMetrics() +func buildGaugeMetric(parsedMetric statsDMetric, timeNow time.Time) pmetric.ScopeMetrics { + ilm := pmetric.NewScopeMetrics() nm := ilm.Metrics().AppendEmpty() nm.SetName(parsedMetric.description.name) if parsedMetric.unit != "" { nm.SetUnit(parsedMetric.unit) } - nm.SetDataType(pdata.MetricDataTypeGauge) + nm.SetDataType(pmetric.MetricDataTypeGauge) dp := nm.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(parsedMetric.gaugeValue()) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for i := parsedMetric.description.attrs.Iter(); i.Next(); { dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } @@ -67,10 +68,10 @@ func buildGaugeMetric(parsedMetric statsDMetric, timeNow time.Time) pdata.ScopeM return ilm } -func buildSummaryMetric(desc statsDMetricDescription, summary summaryMetric, startTime, timeNow time.Time, percentiles []float64, ilm pdata.ScopeMetrics) { +func buildSummaryMetric(desc statsDMetricDescription, summary summaryMetric, startTime, timeNow time.Time, percentiles []float64, ilm pmetric.ScopeMetrics) { nm := ilm.Metrics().AppendEmpty() nm.SetName(desc.name) - nm.SetDataType(pdata.MetricDataTypeSummary) + nm.SetDataType(pmetric.MetricDataTypeSummary) dp := nm.Summary().DataPoints().AppendEmpty() @@ -86,8 +87,8 @@ func buildSummaryMetric(desc statsDMetricDescription, summary summaryMetric, sta dp.SetCount(uint64(count)) dp.SetSum(sum) - dp.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for i := desc.attrs.Iter(); i.Next(); { dp.Attributes().InsertString(string(i.Attribute().Key), i.Attribute().Value.AsString()) } diff --git a/receiver/statsdreceiver/protocol/metric_translator_test.go b/receiver/statsdreceiver/protocol/metric_translator_test.go index 6f1a1ed8587c..fce1297e0f20 100644 --- a/receiver/statsdreceiver/protocol/metric_translator_test.go +++ b/receiver/statsdreceiver/protocol/metric_translator_test.go @@ -19,7 +19,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/otel/attribute" ) @@ -38,17 +39,17 @@ func TestBuildCounterMetric(t *testing.T) { } isMonotonicCounter := false metric := buildCounterMetric(parsedMetric, isMonotonicCounter, timeNow, lastUpdateInterval) - expectedMetrics := pdata.NewScopeMetrics() + expectedMetrics := pmetric.NewScopeMetrics() expectedMetric := expectedMetrics.Metrics().AppendEmpty() expectedMetric.SetName("testCounter") expectedMetric.SetUnit("meter") - expectedMetric.SetDataType(pdata.MetricDataTypeSum) - expectedMetric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + expectedMetric.SetDataType(pmetric.MetricDataTypeSum) + expectedMetric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) expectedMetric.Sum().SetIsMonotonic(isMonotonicCounter) dp := expectedMetric.Sum().DataPoints().AppendEmpty() dp.SetIntVal(32) - dp.SetStartTimestamp(pdata.NewTimestampFromTime(lastUpdateInterval)) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(lastUpdateInterval)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) dp.Attributes().InsertString("mykey", "myvalue") assert.Equal(t, metric, expectedMetrics) } @@ -68,14 +69,14 @@ func TestBuildGaugeMetric(t *testing.T) { unit: "meter", } metric := buildGaugeMetric(parsedMetric, timeNow) - expectedMetrics := pdata.NewScopeMetrics() + expectedMetrics := pmetric.NewScopeMetrics() expectedMetric := expectedMetrics.Metrics().AppendEmpty() expectedMetric.SetName("testGauge") expectedMetric.SetUnit("meter") - expectedMetric.SetDataType(pdata.MetricDataTypeGauge) + expectedMetric.SetDataType(pmetric.MetricDataTypeGauge) dp := expectedMetric.Gauge().DataPoints().AppendEmpty() dp.SetDoubleVal(32.3) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) dp.Attributes().InsertString("mykey", "myvalue") dp.Attributes().InsertString("mykey2", "myvalue2") assert.Equal(t, metric, expectedMetrics) @@ -100,18 +101,18 @@ func TestBuildSummaryMetricUnsampled(t *testing.T) { attrs: attrs, } - metric := pdata.NewScopeMetrics() + metric := pmetric.NewScopeMetrics() buildSummaryMetric(desc, unsampledMetric, timeNow.Add(-time.Minute), timeNow, statsDDefaultPercentiles, metric) - expectedMetric := pdata.NewScopeMetrics() + expectedMetric := pmetric.NewScopeMetrics() m := expectedMetric.Metrics().AppendEmpty() m.SetName("testSummary") - m.SetDataType(pdata.MetricDataTypeSummary) + m.SetDataType(pmetric.MetricDataTypeSummary) dp := m.Summary().DataPoints().AppendEmpty() dp.SetSum(21) dp.SetCount(6) - dp.SetStartTimestamp(pdata.NewTimestampFromTime(timeNow.Add(-time.Minute))) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(timeNow.Add(-time.Minute))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for _, kv := range desc.attrs.ToSlice() { dp.Attributes().InsertString(string(kv.Key), kv.Value.AsString()) } @@ -181,20 +182,20 @@ func TestBuildSummaryMetricSampled(t *testing.T) { attrs: attrs, } - metric := pdata.NewScopeMetrics() + metric := pmetric.NewScopeMetrics() buildSummaryMetric(desc, sampledMetric, timeNow.Add(-time.Minute), timeNow, test.percentiles, metric) - expectedMetric := pdata.NewScopeMetrics() + expectedMetric := pmetric.NewScopeMetrics() m := expectedMetric.Metrics().AppendEmpty() m.SetName("testSummary") - m.SetDataType(pdata.MetricDataTypeSummary) + m.SetDataType(pmetric.MetricDataTypeSummary) dp := m.Summary().DataPoints().AppendEmpty() dp.SetSum(test.sum) dp.SetCount(test.count) - dp.SetStartTimestamp(pdata.NewTimestampFromTime(timeNow.Add(-time.Minute))) - dp.SetTimestamp(pdata.NewTimestampFromTime(timeNow)) + dp.SetStartTimestamp(pcommon.NewTimestampFromTime(timeNow.Add(-time.Minute))) + dp.SetTimestamp(pcommon.NewTimestampFromTime(timeNow)) for _, kv := range desc.attrs.ToSlice() { dp.Attributes().InsertString(string(kv.Key), kv.Value.AsString()) } diff --git a/receiver/statsdreceiver/protocol/parser.go b/receiver/statsdreceiver/protocol/parser.go index 2da0197d8f27..d3ca79754dcd 100644 --- a/receiver/statsdreceiver/protocol/parser.go +++ b/receiver/statsdreceiver/protocol/parser.go @@ -15,12 +15,12 @@ package protocol // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) // Parser is something that can map input StatsD strings to OTLP Metric representations. type Parser interface { Initialize(enableMetricType bool, isMonotonicCounter bool, sendTimerHistogram []TimerHistogramMapping) error - GetMetrics() pdata.Metrics + GetMetrics() pmetric.Metrics Aggregate(line string) error } diff --git a/receiver/statsdreceiver/protocol/statsd_parser.go b/receiver/statsdreceiver/protocol/statsd_parser.go index 4dd89099b065..b9b11f73c0f6 100644 --- a/receiver/statsdreceiver/protocol/statsd_parser.go +++ b/receiver/statsdreceiver/protocol/statsd_parser.go @@ -21,7 +21,7 @@ import ( "strings" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/otel/attribute" ) @@ -64,10 +64,10 @@ type TimerHistogramMapping struct { // StatsDParser supports the Parse method for parsing StatsD messages with Tags. type StatsDParser struct { - gauges map[statsDMetricDescription]pdata.ScopeMetrics - counters map[statsDMetricDescription]pdata.ScopeMetrics + gauges map[statsDMetricDescription]pmetric.ScopeMetrics + counters map[statsDMetricDescription]pmetric.ScopeMetrics summaries map[statsDMetricDescription]summaryMetric - timersAndDistributions []pdata.ScopeMetrics + timersAndDistributions []pmetric.ScopeMetrics enableMetricType bool isMonotonicCounter bool observeTimer ObserverType @@ -115,9 +115,9 @@ func (t MetricType) FullName() TypeName { func (p *StatsDParser) Initialize(enableMetricType bool, isMonotonicCounter bool, sendTimerHistogram []TimerHistogramMapping) error { p.lastIntervalTime = timeNowFunc() - p.gauges = make(map[statsDMetricDescription]pdata.ScopeMetrics) - p.counters = make(map[statsDMetricDescription]pdata.ScopeMetrics) - p.timersAndDistributions = make([]pdata.ScopeMetrics, 0) + p.gauges = make(map[statsDMetricDescription]pmetric.ScopeMetrics) + p.counters = make(map[statsDMetricDescription]pmetric.ScopeMetrics) + p.timersAndDistributions = make([]pmetric.ScopeMetrics, 0) p.summaries = make(map[statsDMetricDescription]summaryMetric) p.observeHistogram = DefaultObserverType @@ -137,8 +137,8 @@ func (p *StatsDParser) Initialize(enableMetricType bool, isMonotonicCounter bool } // GetMetrics gets the metrics preparing for flushing and reset the state. -func (p *StatsDParser) GetMetrics() pdata.Metrics { - metrics := pdata.NewMetrics() +func (p *StatsDParser) GetMetrics() pmetric.Metrics { + metrics := pmetric.NewMetrics() rm := metrics.ResourceMetrics().AppendEmpty() for _, metric := range p.gauges { @@ -165,9 +165,9 @@ func (p *StatsDParser) GetMetrics() pdata.Metrics { } p.lastIntervalTime = timeNowFunc() - p.gauges = make(map[statsDMetricDescription]pdata.ScopeMetrics) - p.counters = make(map[statsDMetricDescription]pdata.ScopeMetrics) - p.timersAndDistributions = make([]pdata.ScopeMetrics, 0) + p.gauges = make(map[statsDMetricDescription]pmetric.ScopeMetrics) + p.counters = make(map[statsDMetricDescription]pmetric.ScopeMetrics) + p.timersAndDistributions = make([]pmetric.ScopeMetrics, 0) p.summaries = make(map[statsDMetricDescription]summaryMetric) return metrics } diff --git a/receiver/statsdreceiver/protocol/statsd_parser_test.go b/receiver/statsdreceiver/protocol/statsd_parser_test.go index 04bcb7d5a44e..108b3028cbe4 100644 --- a/receiver/statsdreceiver/protocol/statsd_parser_test.go +++ b/receiver/statsdreceiver/protocol/statsd_parser_test.go @@ -20,7 +20,7 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/otel/attribute" ) @@ -490,9 +490,9 @@ func TestStatsDParser_Aggregate(t *testing.T) { tests := []struct { name string input []string - expectedGauges map[statsDMetricDescription]pdata.ScopeMetrics - expectedCounters map[statsDMetricDescription]pdata.ScopeMetrics - expectedTimer []pdata.ScopeMetrics + expectedGauges map[statsDMetricDescription]pmetric.ScopeMetrics + expectedCounters map[statsDMetricDescription]pmetric.ScopeMetrics + expectedTimer []pmetric.ScopeMetrics err error }{ { @@ -520,14 +520,14 @@ func TestStatsDParser_Aggregate(t *testing.T) { "statsdTestMetric2:+5|g|#mykey:myvalue", "statsdTestMetric2:+500|g|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 10102, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), testDescription("statsdTestMetric2", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric2", 507, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), }, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedTimer: []pdata.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedTimer: []pmetric.ScopeMetrics{}, }, { name: "gauge minus", @@ -543,14 +543,14 @@ func TestStatsDParser_Aggregate(t *testing.T) { "statsdTestMetric1:-100|g|#mykey:myvalue", "statsdTestMetric1:-1|g|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 4885, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), testDescription("statsdTestMetric2", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric2", 5, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), }, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedTimer: []pdata.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedTimer: []pmetric.ScopeMetrics{}, }, { name: "gauge plus and minus", @@ -566,14 +566,14 @@ func TestStatsDParser_Aggregate(t *testing.T) { "statsdTestMetric2:-200|g|#mykey:myvalue", "statsdTestMetric2:200|g|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 4101, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), testDescription("statsdTestMetric2", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric2", 200, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), }, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedTimer: []pdata.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedTimer: []pmetric.ScopeMetrics{}, }, { name: "counter with increment and sample rate", @@ -583,14 +583,14 @@ func TestStatsDParser_Aggregate(t *testing.T) { "statsdTestMetric2:20|c|@0.8|#mykey:myvalue", "statsdTestMetric2:20|c|@0.8|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "c", []string{"mykey"}, []string{"myvalue"}): buildCounterMetric(testStatsDMetric("statsdTestMetric1", 7000, false, "c", 0, []string{"mykey"}, []string{"myvalue"}), false, time.Unix(711, 0), time.Unix(611, 0)), testDescription("statsdTestMetric2", "c", []string{"mykey"}, []string{"myvalue"}): buildCounterMetric(testStatsDMetric("statsdTestMetric2", 50, false, "c", 0, []string{"mykey"}, []string{"myvalue"}), false, time.Unix(711, 0), time.Unix(611, 0)), }, - expectedTimer: []pdata.ScopeMetrics{}, + expectedTimer: []pmetric.ScopeMetrics{}, }, { name: "counter and gauge: one gauge and two counters", @@ -605,17 +605,17 @@ func TestStatsDParser_Aggregate(t *testing.T) { "statsdTestMetric1:+2|g|#mykey:myvalue", "statsdTestMetric2:20|c|@0.8|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 421, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), }, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "c", []string{"mykey"}, []string{"myvalue"}): buildCounterMetric(testStatsDMetric("statsdTestMetric1", 7000, false, "c", 0, []string{"mykey"}, []string{"myvalue"}), false, time.Unix(711, 0), time.Unix(611, 0)), testDescription("statsdTestMetric2", "c", []string{"mykey"}, []string{"myvalue"}): buildCounterMetric(testStatsDMetric("statsdTestMetric2", 50, false, "c", 0, []string{"mykey"}, []string{"myvalue"}), false, time.Unix(711, 0), time.Unix(611, 0)), }, - expectedTimer: []pdata.ScopeMetrics{}, + expectedTimer: []pmetric.ScopeMetrics{}, }, { name: "counter and gauge: 2 gauges and 2 counters", @@ -631,19 +631,19 @@ func TestStatsDParser_Aggregate(t *testing.T) { "statsdTestMetric1:15|c|#mykey:myvalue", "statsdTestMetric2:5|c|@0.2|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "g", []string{"mykey"}, []string{"myvalue"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 319, false, "g", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), testDescription("statsdTestMetric1", "g", []string{"mykey"}, []string{"myvalue1"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 399, false, "g", 0, []string{"mykey"}, []string{"myvalue1"}), time.Unix(711, 0)), }, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "c", []string{"mykey"}, []string{"myvalue"}): buildCounterMetric(testStatsDMetric("statsdTestMetric1", 215, false, "c", 0, []string{"mykey"}, []string{"myvalue"}), false, time.Unix(711, 0), time.Unix(611, 0)), testDescription("statsdTestMetric2", "c", []string{"mykey"}, []string{"myvalue"}): buildCounterMetric(testStatsDMetric("statsdTestMetric2", 75, false, "c", 0, []string{"mykey"}, []string{"myvalue"}), false, time.Unix(711, 0), time.Unix(611, 0)), }, - expectedTimer: []pdata.ScopeMetrics{}, + expectedTimer: []pmetric.ScopeMetrics{}, }, { name: "counter and gauge: 2 timings and 2 histograms", @@ -653,9 +653,9 @@ func TestStatsDParser_Aggregate(t *testing.T) { "statsdTestMetric1:300|ms|#mykey:myvalue", "statsdTestMetric1:10|h|@0.1|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedTimer: []pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedTimer: []pmetric.ScopeMetrics{ buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 500, false, "ms", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 400, false, "h", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 300, false, "ms", 0, []string{"mykey"}, []string{"myvalue"}), time.Unix(711, 0)), @@ -691,8 +691,8 @@ func TestStatsDParser_AggregateWithMetricType(t *testing.T) { tests := []struct { name string input []string - expectedGauges map[statsDMetricDescription]pdata.ScopeMetrics - expectedCounters map[statsDMetricDescription]pdata.ScopeMetrics + expectedGauges map[statsDMetricDescription]pmetric.ScopeMetrics + expectedCounters map[statsDMetricDescription]pmetric.ScopeMetrics err error }{ { @@ -706,13 +706,13 @@ func TestStatsDParser_AggregateWithMetricType(t *testing.T) { "statsdTestMetric2:+5|g|#mykey:myvalue", "statsdTestMetric2:+500|g|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "g", []string{"mykey", "metric_type"}, []string{"myvalue", "gauge"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric1", 10102, false, "g", 0, []string{"mykey", "metric_type"}, []string{"myvalue", "gauge"}), time.Unix(711, 0)), testDescription("statsdTestMetric2", "g", []string{"mykey", "metric_type"}, []string{"myvalue", "gauge"}): buildGaugeMetric(testStatsDMetric("statsdTestMetric2", 507, false, "g", 0, []string{"mykey", "metric_type"}, []string{"myvalue", "gauge"}), time.Unix(711, 0)), }, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{}, }, { @@ -723,8 +723,8 @@ func TestStatsDParser_AggregateWithMetricType(t *testing.T) { "statsdTestMetric2:20|c|@0.8|#mykey:myvalue", "statsdTestMetric2:20|c|@0.8|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "c", []string{"mykey", "metric_type"}, []string{"myvalue", "counter"}): buildCounterMetric(testStatsDMetric("statsdTestMetric1", 7000, false, "c", 0, []string{"mykey", "metric_type"}, []string{"myvalue", "counter"}), false, time.Unix(711, 0), time.Unix(611, 0)), testDescription("statsdTestMetric2", "c", @@ -759,8 +759,8 @@ func TestStatsDParser_AggregateWithIsMonotonicCounter(t *testing.T) { tests := []struct { name string input []string - expectedGauges map[statsDMetricDescription]pdata.ScopeMetrics - expectedCounters map[statsDMetricDescription]pdata.ScopeMetrics + expectedGauges map[statsDMetricDescription]pmetric.ScopeMetrics + expectedCounters map[statsDMetricDescription]pmetric.ScopeMetrics err error }{ { @@ -771,8 +771,8 @@ func TestStatsDParser_AggregateWithIsMonotonicCounter(t *testing.T) { "statsdTestMetric2:20|c|@0.8|#mykey:myvalue", "statsdTestMetric2:20|c|@0.8|#mykey:myvalue", }, - expectedGauges: map[statsDMetricDescription]pdata.ScopeMetrics{}, - expectedCounters: map[statsDMetricDescription]pdata.ScopeMetrics{ + expectedGauges: map[statsDMetricDescription]pmetric.ScopeMetrics{}, + expectedCounters: map[statsDMetricDescription]pmetric.ScopeMetrics{ testDescription("statsdTestMetric1", "c", []string{"mykey"}, []string{"myvalue"}): buildCounterMetric(testStatsDMetric("statsdTestMetric1", 7000, false, "c", 0, []string{"mykey"}, []string{"myvalue"}), true, time.Unix(711, 0), time.Unix(611, 0)), testDescription("statsdTestMetric2", "c", @@ -900,7 +900,7 @@ func TestStatsDParser_Initialize(t *testing.T) { name: "test", metricType: "g", attrs: *attribute.EmptySet()} - p.gauges[teststatsdDMetricdescription] = pdata.ScopeMetrics{} + p.gauges[teststatsdDMetricdescription] = pmetric.ScopeMetrics{} assert.Equal(t, 1, len(p.gauges)) assert.Equal(t, GaugeObserver, p.observeTimer) assert.Equal(t, GaugeObserver, p.observeHistogram) diff --git a/receiver/statsdreceiver/receiver.go b/receiver/statsdreceiver/receiver.go index 59f55a1945d3..d8147579d2e2 100644 --- a/receiver/statsdreceiver/receiver.go +++ b/receiver/statsdreceiver/receiver.go @@ -25,7 +25,7 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/protocol" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport" @@ -125,7 +125,7 @@ func (r *statsdReceiver) Shutdown(context.Context) error { return err } -func (r *statsdReceiver) Flush(ctx context.Context, metrics pdata.Metrics, nextConsumer consumer.Metrics) error { +func (r *statsdReceiver) Flush(ctx context.Context, metrics pmetric.Metrics, nextConsumer consumer.Metrics) error { error := nextConsumer.ConsumeMetrics(ctx, metrics) if error != nil { return error diff --git a/receiver/statsdreceiver/receiver_test.go b/receiver/statsdreceiver/receiver_test.go index bdb647df447b..898ed5000616 100644 --- a/receiver/statsdreceiver/receiver_test.go +++ b/receiver/statsdreceiver/receiver_test.go @@ -30,7 +30,7 @@ import ( "go.opentelemetry.io/collector/config/confignet" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/common/testutil" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver/transport" @@ -85,7 +85,7 @@ func TestStatsdReceiver_Flush(t *testing.T) { rcv, err := New(componenttest.NewNopReceiverCreateSettings(), *cfg, nextConsumer) assert.NoError(t, err) r := rcv.(*statsdReceiver) - var metrics = pdata.NewMetrics() + var metrics = pmetric.NewMetrics() assert.Nil(t, r.Flush(ctx, metrics, nextConsumer)) r.Start(ctx, componenttest.NewNopHost()) r.Shutdown(ctx) @@ -155,7 +155,7 @@ func Test_statsdreceiver_EndToEnd(t *testing.T) { require.Equal(t, 1, mdd[0].ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len()) metric := mdd[0].ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0) assert.Equal(t, statsdMetric.Name, metric.Name()) - assert.Equal(t, pdata.MetricDataTypeSum, metric.DataType()) + assert.Equal(t, pmetric.MetricDataTypeSum, metric.DataType()) require.Equal(t, 1, metric.Sum().DataPoints().Len()) }) } diff --git a/receiver/syslogreceiver/go.mod b/receiver/syslogreceiver/go.mod index fe42883717c6..5236d9e34f82 100644 --- a/receiver/syslogreceiver/go.mod +++ b/receiver/syslogreceiver/go.mod @@ -6,8 +6,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza v0.48.0 github.com/open-telemetry/opentelemetry-log-collection v0.29.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 gopkg.in/yaml.v2 v2.4.0 ) @@ -18,7 +18,7 @@ require ( github.com/influxdata/go-syslog/v3 v3.0.1-0.20210608084020-ac565dc76ba6 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -27,7 +27,6 @@ require ( github.com/observiq/ctimefmt v1.0.0 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -43,3 +42,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza => ../../internal/stanza replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage => ../../extension/storage + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/syslogreceiver/go.sum b/receiver/syslogreceiver/go.sum index 9472fb70fb3d..daa60cc7a2ad 100644 --- a/receiver/syslogreceiver/go.sum +++ b/receiver/syslogreceiver/go.sum @@ -19,7 +19,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -76,7 +76,6 @@ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -112,8 +111,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -173,8 +172,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -193,17 +190,17 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/receiver/syslogreceiver/syslog_test.go b/receiver/syslogreceiver/syslog_test.go index 8c13b6b5ec7e..c61ff91ab198 100644 --- a/receiver/syslogreceiver/syslog_test.go +++ b/receiver/syslogreceiver/syslog_test.go @@ -27,7 +27,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/service/servicetest" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza" @@ -76,7 +76,7 @@ func testSyslog(t *testing.T, cfg *SysLogConfig) { for i := 0; i < numLogs; i++ { log := logs.At(i) - require.Equal(t, log.Timestamp(), pdata.Timestamp(1614470402003000000+i*60*1000*1000*1000)) + require.Equal(t, log.Timestamp(), pcommon.Timestamp(1614470402003000000+i*60*1000*1000*1000)) msg, ok := log.Attributes().AsRaw()["message"] require.True(t, ok) require.Equal(t, msg, fmt.Sprintf("test msg %d", i)) diff --git a/receiver/tcplogreceiver/go.mod b/receiver/tcplogreceiver/go.mod index bdf2e93815a9..ffee874f74ec 100644 --- a/receiver/tcplogreceiver/go.mod +++ b/receiver/tcplogreceiver/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza v0.48.0 github.com/open-telemetry/opentelemetry-log-collection v0.29.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d gopkg.in/yaml.v2 v2.4.0 ) @@ -16,7 +16,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -25,9 +25,8 @@ require ( github.com/observiq/ctimefmt v1.0.0 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -42,3 +41,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza => ../../internal/stanza replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage => ../../extension/storage + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/tcplogreceiver/go.sum b/receiver/tcplogreceiver/go.sum index c71f8c167872..daa4dc4162e0 100644 --- a/receiver/tcplogreceiver/go.sum +++ b/receiver/tcplogreceiver/go.sum @@ -19,7 +19,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -76,7 +76,6 @@ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -110,8 +109,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -170,8 +169,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -190,17 +187,17 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/receiver/udplogreceiver/go.mod b/receiver/udplogreceiver/go.mod index 61bf32d95b61..20734de9fa86 100644 --- a/receiver/udplogreceiver/go.mod +++ b/receiver/udplogreceiver/go.mod @@ -6,7 +6,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza v0.48.0 github.com/open-telemetry/opentelemetry-log-collection v0.29.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d gopkg.in/yaml.v2 v2.4.0 ) @@ -15,7 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -24,9 +24,8 @@ require ( github.com/observiq/ctimefmt v1.0.0 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -41,3 +40,5 @@ require ( replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/stanza => ../../internal/stanza replace github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage => ../../extension/storage + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/udplogreceiver/go.sum b/receiver/udplogreceiver/go.sum index fb22fe74bab2..d5e5d6470709 100644 --- a/receiver/udplogreceiver/go.sum +++ b/receiver/udplogreceiver/go.sum @@ -19,7 +19,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -76,7 +76,6 @@ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -108,8 +107,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -168,8 +167,6 @@ github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -188,17 +185,17 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= diff --git a/receiver/wavefrontreceiver/go.mod b/receiver/wavefrontreceiver/go.mod index 83a2b9d2f1ef..977a5d2c5570 100644 --- a/receiver/wavefrontreceiver/go.mod +++ b/receiver/wavefrontreceiver/go.mod @@ -9,7 +9,7 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/receiver/carbonreceiver v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/receiver/collectdreceiver v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d google.golang.org/protobuf v1.28.0 ) @@ -18,15 +18,15 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.48.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/collector/model v0.48.0 // indirect + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect @@ -46,3 +46,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/common => ../../internal/common replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../../internal/coreinternal + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/wavefrontreceiver/go.sum b/receiver/wavefrontreceiver/go.sum index 37f06080e186..c9acb00e1813 100644 --- a/receiver/wavefrontreceiver/go.sum +++ b/receiver/wavefrontreceiver/go.sum @@ -15,7 +15,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -74,7 +74,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -105,8 +104,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -147,8 +146,6 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -163,17 +160,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -207,7 +206,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -229,7 +228,7 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/windowsperfcountersreceiver/go.mod b/receiver/windowsperfcountersreceiver/go.mod index 7bf7cc43fa8a..ffd2c2a2f4f2 100644 --- a/receiver/windowsperfcountersreceiver/go.mod +++ b/receiver/windowsperfcountersreceiver/go.mod @@ -5,8 +5,7 @@ go 1.17 require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/winperfcounters v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 ) @@ -15,8 +14,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -24,23 +22,20 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/pelletier/go-toml v1.9.4 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect - golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d // indirect - golang.org/x/text v0.3.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect - google.golang.org/grpc v1.45.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) -require golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect +require golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest => ../../internal/scrapertest replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/winperfcounters => ../../pkg/winperfcounters + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/windowsperfcountersreceiver/go.sum b/receiver/windowsperfcountersreceiver/go.sum index 7ccd183f4ac4..a1f60d78dff5 100644 --- a/receiver/windowsperfcountersreceiver/go.sum +++ b/receiver/windowsperfcountersreceiver/go.sum @@ -1,8 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= @@ -18,18 +15,10 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -37,9 +26,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= @@ -47,7 +33,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= @@ -64,18 +49,14 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -85,13 +66,10 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -121,8 +99,8 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -161,20 +139,15 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.6.2 h1:aIihoIOHCiLZHxyoNQ+ABL4NKhFTgKLBdMLyEAh98m0= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= @@ -184,20 +157,19 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -221,20 +193,16 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -250,22 +218,19 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -286,22 +251,16 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -311,18 +270,12 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go index 2c164101cd47..7f34e2cadd65 100644 --- a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go +++ b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper.go @@ -22,7 +22,8 @@ import ( "time" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" "go.uber.org/zap" @@ -70,14 +71,14 @@ func (s *scraper) shutdown(context.Context) error { return errs } -func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { - md := pdata.NewMetrics() +func (s *scraper) scrape(context.Context) (pmetric.Metrics, error) { + md := pmetric.NewMetrics() metricSlice := md.ResourceMetrics().AppendEmpty().ScopeMetrics().AppendEmpty().Metrics() - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) var errs error metricSlice.EnsureCapacity(len(s.watchers)) - metrics := map[string]pdata.Metric{} + metrics := map[string]pmetric.Metric{} for name, metricCfg := range s.cfg.MetricMetaData { builtMetric := metricSlice.AppendEmpty() @@ -86,17 +87,17 @@ func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { builtMetric.SetUnit(metricCfg.Unit) if (metricCfg.Sum != SumMetric{}) { - builtMetric.SetDataType(pdata.MetricDataTypeSum) + builtMetric.SetDataType(pmetric.MetricDataTypeSum) builtMetric.Sum().SetIsMonotonic(metricCfg.Sum.Monotonic) switch metricCfg.Sum.Aggregation { case "cumulative": - builtMetric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + builtMetric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) case "delta": - builtMetric.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityDelta) + builtMetric.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityDelta) } } else { - builtMetric.SetDataType(pdata.MetricDataTypeGauge) + builtMetric.SetDataType(pmetric.MetricDataTypeGauge) } metrics[name] = builtMetric @@ -113,13 +114,13 @@ func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { } for _, scrapedValue := range counterVals { - var metric pdata.Metric + var metric pmetric.Metric metricRep := scrapedValue.MetricRep if builtmetric, ok := metrics[metricRep.Name]; ok { metric = builtmetric } else { metric = metricSlice.AppendEmpty() - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) metric.SetName(metricRep.Name) metric.SetUnit("1") } @@ -130,10 +131,10 @@ func (s *scraper) scrape(context.Context) (pdata.Metrics, error) { return md, errs } -func initializeMetricDps(metric pdata.Metric, now pdata.Timestamp, counterValue float64, attributes map[string]string) { - var dps pdata.NumberDataPointSlice +func initializeMetricDps(metric pmetric.Metric, now pcommon.Timestamp, counterValue float64, attributes map[string]string) { + var dps pmetric.NumberDataPointSlice - if metric.DataType() == pdata.MetricDataTypeGauge { + if metric.DataType() == pmetric.MetricDataTypeGauge { dps = metric.Gauge().DataPoints() } else { dps = metric.Sum().DataPoints() diff --git a/receiver/zipkinreceiver/go.mod b/receiver/zipkinreceiver/go.mod index eee0ae53a212..0f22da23f6d9 100644 --- a/receiver/zipkinreceiver/go.mod +++ b/receiver/zipkinreceiver/go.mod @@ -8,8 +8,9 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.48.0 github.com/openzipkin/zipkin-go v0.4.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 google.golang.org/protobuf v1.28.0 ) @@ -25,7 +26,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -33,7 +34,6 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rs/cors v1.8.2 // indirect - github.com/spf13/cast v1.4.1 // indirect github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect go.opencensus.io v0.23.0 // indirect @@ -44,6 +44,8 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect + golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/text v0.3.7 // indirect google.golang.org/grpc v1.45.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect @@ -54,3 +56,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/corei replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin => ../../pkg/translator/zipkin replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/opencensus => ../../pkg/translator/opencensus + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/zipkinreceiver/go.sum b/receiver/zipkinreceiver/go.sum index cab67db5b119..2c6f6cd0c44e 100644 --- a/receiver/zipkinreceiver/go.sum +++ b/receiver/zipkinreceiver/go.sum @@ -22,7 +22,7 @@ github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAm github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -104,7 +104,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= @@ -151,8 +150,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -216,8 +215,6 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -243,10 +240,12 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.31.0 h1:li8u9OSMvLau7rMs8bmiL82OazG6MAkwPz2i6eS8TBQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0 h1:woM+Mb4d0A+Dxa3rYPenSN5ZeS9qHUvE8rlObiLRXTY= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.31.0/go.mod h1:PFmBsWbldL1kiWZk9+0LBZz2brhByaGsvp6pRICMlPE= @@ -257,7 +256,7 @@ go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1 go.opentelemetry.io/otel/metric v0.28.0/go.mod h1:TrzsfQAmQaB1PDcdhBauLMk7nyyg9hm+GoQq/ekE9Iw= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.0/go.mod h1:qs7BrU5cZ8dXQHBGxHMOxwME/27YH2qEp4/+tZLLwJE= go.opentelemetry.io/otel/trace v1.6.1/go.mod h1:RkFRM1m0puWIq10oxImnGEduNBzxiN7TXluRBtE+5j0= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= @@ -304,7 +303,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -335,7 +335,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/receiver/zipkinreceiver/proto_parse_test.go b/receiver/zipkinreceiver/proto_parse_test.go index 146a42d81dbc..4f0e7aca69ce 100644 --- a/receiver/zipkinreceiver/proto_parse_test.go +++ b/receiver/zipkinreceiver/proto_parse_test.go @@ -22,8 +22,9 @@ import ( "github.com/openzipkin/zipkin-go/proto/zipkin_proto3" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" "google.golang.org/protobuf/proto" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" @@ -107,18 +108,18 @@ func TestConvertSpansToTraceSpans_protobuf(t *testing.T) { require.NoError(t, err, "Failed to parse convert Zipkin spans in Protobuf to Trace spans: %v", err) require.Equal(t, reqs.ResourceSpans().Len(), 2, "Expecting exactly 2 requests since spans have different node/localEndpoint: %v", reqs.ResourceSpans().Len()) - want := pdata.NewTraces() + want := ptrace.NewTraces() want.ResourceSpans().EnsureCapacity(2) // First span/resource want.ResourceSpans().AppendEmpty().Resource().Attributes().UpsertString(conventions.AttributeServiceName, "svc-1") span0 := want.ResourceSpans().At(0).ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span0.SetTraceID(pdata.NewTraceID([16]byte{0x7F, 0x6F, 0x5F, 0x4F, 0x3F, 0x2F, 0x1F, 0x0F, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0})) - span0.SetSpanID(pdata.NewSpanID([8]byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0})) - span0.SetParentSpanID(pdata.NewSpanID([8]byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0})) + span0.SetTraceID(pcommon.NewTraceID([16]byte{0x7F, 0x6F, 0x5F, 0x4F, 0x3F, 0x2F, 0x1F, 0x0F, 0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0})) + span0.SetSpanID(pcommon.NewSpanID([8]byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0})) + span0.SetParentSpanID(pcommon.NewSpanID([8]byte{0xF7, 0xF6, 0xF5, 0xF4, 0xF3, 0xF2, 0xF1, 0xF0})) span0.SetName("ProtoSpan1") - span0.SetStartTimestamp(pdata.NewTimestampFromTime(now)) - span0.SetEndTimestamp(pdata.NewTimestampFromTime(now.Add(12 * time.Second))) + span0.SetStartTimestamp(pcommon.NewTimestampFromTime(now)) + span0.SetEndTimestamp(pcommon.NewTimestampFromTime(now.Add(12 * time.Second))) span0.Attributes().UpsertString(conventions.AttributeNetHostIP, "192.168.0.1") span0.Attributes().UpsertInt(conventions.AttributeNetHostPort, 8009) span0.Attributes().UpsertString(conventions.AttributeNetPeerName, "memcached") @@ -129,12 +130,12 @@ func TestConvertSpansToTraceSpans_protobuf(t *testing.T) { // Second span/resource want.ResourceSpans().AppendEmpty().Resource().Attributes().UpsertString(conventions.AttributeServiceName, "search") span1 := want.ResourceSpans().At(1).ScopeSpans().AppendEmpty().Spans().AppendEmpty() - span1.SetTraceID(pdata.NewTraceID([16]byte{0x7A, 0x6A, 0x5A, 0x4A, 0x3A, 0x2A, 0x1A, 0x0A, 0xC7, 0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0})) - span1.SetSpanID(pdata.NewSpanID([8]byte{0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x61, 0x60})) - span1.SetParentSpanID(pdata.NewSpanID([8]byte{0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10})) + span1.SetTraceID(pcommon.NewTraceID([16]byte{0x7A, 0x6A, 0x5A, 0x4A, 0x3A, 0x2A, 0x1A, 0x0A, 0xC7, 0xC6, 0xC5, 0xC4, 0xC3, 0xC2, 0xC1, 0xC0})) + span1.SetSpanID(pcommon.NewSpanID([8]byte{0x67, 0x66, 0x65, 0x64, 0x63, 0x62, 0x61, 0x60})) + span1.SetParentSpanID(pcommon.NewSpanID([8]byte{0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10})) span1.SetName("CacheWarmUp") - span1.SetStartTimestamp(pdata.NewTimestampFromTime(now.Add(-10 * time.Hour))) - span1.SetEndTimestamp(pdata.NewTimestampFromTime(now.Add(-10 * time.Hour).Add(7 * time.Second))) + span1.SetStartTimestamp(pcommon.NewTimestampFromTime(now.Add(-10 * time.Hour))) + span1.SetEndTimestamp(pcommon.NewTimestampFromTime(now.Add(-10 * time.Hour).Add(7 * time.Second))) span1.Attributes().UpsertString(conventions.AttributeNetHostIP, "10.0.0.13") span1.Attributes().UpsertInt(conventions.AttributeNetHostPort, 8009) span1.Attributes().UpsertString(conventions.AttributeNetPeerName, "redis") @@ -143,9 +144,9 @@ func TestConvertSpansToTraceSpans_protobuf(t *testing.T) { span1.Attributes().UpsertString(tracetranslator.TagSpanKind, string(tracetranslator.OpenTracingSpanKindProducer)) span1.Events().EnsureCapacity(2) span1.Events().AppendEmpty().SetName("DB reset") - span1.Events().At(0).SetTimestamp(pdata.NewTimestampFromTime(now.Add(-10 * time.Hour))) + span1.Events().At(0).SetTimestamp(pcommon.NewTimestampFromTime(now.Add(-10 * time.Hour))) span1.Events().AppendEmpty().SetName("GC Cycle 39") - span1.Events().At(1).SetTimestamp(pdata.NewTimestampFromTime(now.Add(-10 * time.Hour))) + span1.Events().At(1).SetTimestamp(pcommon.NewTimestampFromTime(now.Add(-10 * time.Hour))) assert.Equal(t, want.SpanCount(), reqs.SpanCount()) assert.Equal(t, want.ResourceSpans().Len(), reqs.ResourceSpans().Len()) @@ -174,7 +175,7 @@ func newTestZipkinReceiver() *zipkinReceiver { } } -func compareResourceSpans(t *testing.T, wantRS pdata.ResourceSpans, reqsRS pdata.ResourceSpans) { +func compareResourceSpans(t *testing.T, wantRS ptrace.ResourceSpans, reqsRS ptrace.ResourceSpans) { assert.Equal(t, wantRS.ScopeSpans().Len(), reqsRS.ScopeSpans().Len()) wantIL := wantRS.ScopeSpans().At(0) reqsIL := reqsRS.ScopeSpans().At(0) diff --git a/receiver/zipkinreceiver/trace_receiver.go b/receiver/zipkinreceiver/trace_receiver.go index 589819fcedb8..a64fd87a23b7 100644 --- a/receiver/zipkinreceiver/trace_receiver.go +++ b/receiver/zipkinreceiver/trace_receiver.go @@ -30,8 +30,8 @@ import ( "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/config" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv1" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin/zipkinv2" @@ -57,11 +57,11 @@ type zipkinReceiver struct { server *http.Server config *Config - v1ThriftUnmarshaler pdata.TracesUnmarshaler - v1JSONUnmarshaler pdata.TracesUnmarshaler - jsonUnmarshaler pdata.TracesUnmarshaler - protobufUnmarshaler pdata.TracesUnmarshaler - protobufDebugUnmarshaler pdata.TracesUnmarshaler + v1ThriftUnmarshaler ptrace.Unmarshaler + v1JSONUnmarshaler ptrace.Unmarshaler + jsonUnmarshaler ptrace.Unmarshaler + protobufUnmarshaler ptrace.Unmarshaler + protobufDebugUnmarshaler ptrace.Unmarshaler settings component.ReceiverCreateSettings } @@ -119,7 +119,7 @@ func (zr *zipkinReceiver) Start(_ context.Context, host component.Host) error { } // v1ToTraceSpans parses Zipkin v1 JSON traces and converts them to OpenCensus Proto spans. -func (zr *zipkinReceiver) v1ToTraceSpans(blob []byte, hdr http.Header) (reqs pdata.Traces, err error) { +func (zr *zipkinReceiver) v1ToTraceSpans(blob []byte, hdr http.Header) (reqs ptrace.Traces, err error) { if hdr.Get("Content-Type") == "application/x-thrift" { return zr.v1ThriftUnmarshaler.UnmarshalTraces(blob) } @@ -127,7 +127,7 @@ func (zr *zipkinReceiver) v1ToTraceSpans(blob []byte, hdr http.Header) (reqs pda } // v2ToTraceSpans parses Zipkin v2 JSON or Protobuf traces and converts them to OpenCensus Proto spans. -func (zr *zipkinReceiver) v2ToTraceSpans(blob []byte, hdr http.Header) (reqs pdata.Traces, err error) { +func (zr *zipkinReceiver) v2ToTraceSpans(blob []byte, hdr http.Header) (reqs ptrace.Traces, err error) { // This flag's reference is from: // https://github.com/openzipkin/zipkin-go/blob/3793c981d4f621c0e3eb1457acffa2c1cc591384/proto/v2/zipkin.proto#L154 debugWasSet := hdr.Get("X-B3-Flags") == "1" @@ -221,7 +221,7 @@ func (zr *zipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { } _ = r.Body.Close() - var td pdata.Traces + var td ptrace.Traces var err error if asZipkinv1 { td, err = zr.v1ToTraceSpans(slurp, r.Header) diff --git a/receiver/zipkinreceiver/trace_receiver_test.go b/receiver/zipkinreceiver/trace_receiver_test.go index 37ea0b747504..e59dba94b71f 100644 --- a/receiver/zipkinreceiver/trace_receiver_test.go +++ b/receiver/zipkinreceiver/trace_receiver_test.go @@ -39,8 +39,8 @@ import ( "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/pcommon" ) const ( @@ -403,7 +403,7 @@ func TestReceiverConvertsStringsToTypes(t *testing.T) { td := next.AllTraces()[0] span := td.ResourceSpans().At(0).ScopeSpans().At(0).Spans().At(0) - expected := pdata.NewMapFromRaw(map[string]interface{}{ + expected := pcommon.NewMapFromRaw(map[string]interface{}{ "cache_hit": true, "ping_count": 25, "timeout": 12.3, diff --git a/receiver/zookeeperreceiver/go.mod b/receiver/zookeeperreceiver/go.mod index 40e5cfdc2fea..6c6f27220b00 100644 --- a/receiver/zookeeperreceiver/go.mod +++ b/receiver/zookeeperreceiver/go.mod @@ -7,8 +7,8 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/internal/containertest v0.48.0 github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrapertest v0.48.0 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -26,7 +26,7 @@ require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect @@ -37,16 +37,13 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect go.opentelemetry.io/otel/trace v1.6.3 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect google.golang.org/grpc v1.45.0 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -61,3 +58,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/scrap // see https://github.com/distribution/distribution/issues/3590 exclude github.com/docker/distribution v2.8.0+incompatible + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/receiver/zookeeperreceiver/go.sum b/receiver/zookeeperreceiver/go.sum index 8be63629173f..756c14f35b19 100644 --- a/receiver/zookeeperreceiver/go.sum +++ b/receiver/zookeeperreceiver/go.sum @@ -451,8 +451,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -630,8 +630,6 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= @@ -698,15 +696,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -804,8 +802,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -889,8 +886,8 @@ golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -901,7 +898,6 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go b/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go index 2c0b8917a9be..0bf859755346 100644 --- a/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go +++ b/receiver/zookeeperreceiver/internal/metadata/generated_metrics_v2.go @@ -5,7 +5,8 @@ package metadata import ( "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricSettings provides common settings for a particular metric. @@ -83,7 +84,7 @@ func DefaultMetricsSettings() MetricsSettings { } type metricZookeeperConnectionActive struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -93,12 +94,12 @@ func (m *metricZookeeperConnectionActive) init() { m.data.SetName("zookeeper.connection.active") m.data.SetDescription("Number of active clients connected to a ZooKeeper server.") m.data.SetUnit("{connections}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperConnectionActive) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperConnectionActive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -116,7 +117,7 @@ func (m *metricZookeeperConnectionActive) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperConnectionActive) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperConnectionActive) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -127,14 +128,14 @@ func (m *metricZookeeperConnectionActive) emit(metrics pdata.MetricSlice) { func newMetricZookeeperConnectionActive(settings MetricSettings) metricZookeeperConnectionActive { m := metricZookeeperConnectionActive{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperDataTreeEphemeralNodeCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -144,12 +145,12 @@ func (m *metricZookeeperDataTreeEphemeralNodeCount) init() { m.data.SetName("zookeeper.data_tree.ephemeral_node.count") m.data.SetDescription("Number of ephemeral nodes that a ZooKeeper server has in its data tree.") m.data.SetUnit("{nodes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperDataTreeEphemeralNodeCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperDataTreeEphemeralNodeCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -167,7 +168,7 @@ func (m *metricZookeeperDataTreeEphemeralNodeCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperDataTreeEphemeralNodeCount) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperDataTreeEphemeralNodeCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -178,14 +179,14 @@ func (m *metricZookeeperDataTreeEphemeralNodeCount) emit(metrics pdata.MetricSli func newMetricZookeeperDataTreeEphemeralNodeCount(settings MetricSettings) metricZookeeperDataTreeEphemeralNodeCount { m := metricZookeeperDataTreeEphemeralNodeCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperDataTreeSize struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -195,12 +196,12 @@ func (m *metricZookeeperDataTreeSize) init() { m.data.SetName("zookeeper.data_tree.size") m.data.SetDescription("Size of data in bytes that a ZooKeeper server has in its data tree.") m.data.SetUnit("By") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperDataTreeSize) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperDataTreeSize) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -218,7 +219,7 @@ func (m *metricZookeeperDataTreeSize) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperDataTreeSize) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperDataTreeSize) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -229,14 +230,14 @@ func (m *metricZookeeperDataTreeSize) emit(metrics pdata.MetricSlice) { func newMetricZookeeperDataTreeSize(settings MetricSettings) metricZookeeperDataTreeSize { m := metricZookeeperDataTreeSize{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperFileDescriptorLimit struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -246,10 +247,10 @@ func (m *metricZookeeperFileDescriptorLimit) init() { m.data.SetName("zookeeper.file_descriptor.limit") m.data.SetDescription("Maximum number of file descriptors that a ZooKeeper server can open.") m.data.SetUnit("{file_descriptors}") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricZookeeperFileDescriptorLimit) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperFileDescriptorLimit) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -267,7 +268,7 @@ func (m *metricZookeeperFileDescriptorLimit) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperFileDescriptorLimit) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperFileDescriptorLimit) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -278,14 +279,14 @@ func (m *metricZookeeperFileDescriptorLimit) emit(metrics pdata.MetricSlice) { func newMetricZookeeperFileDescriptorLimit(settings MetricSettings) metricZookeeperFileDescriptorLimit { m := metricZookeeperFileDescriptorLimit{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperFileDescriptorOpen struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -295,12 +296,12 @@ func (m *metricZookeeperFileDescriptorOpen) init() { m.data.SetName("zookeeper.file_descriptor.open") m.data.SetDescription("Number of file descriptors that a ZooKeeper server has open.") m.data.SetUnit("{file_descriptors}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperFileDescriptorOpen) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperFileDescriptorOpen) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -318,7 +319,7 @@ func (m *metricZookeeperFileDescriptorOpen) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperFileDescriptorOpen) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperFileDescriptorOpen) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -329,14 +330,14 @@ func (m *metricZookeeperFileDescriptorOpen) emit(metrics pdata.MetricSlice) { func newMetricZookeeperFileDescriptorOpen(settings MetricSettings) metricZookeeperFileDescriptorOpen { m := metricZookeeperFileDescriptorOpen{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperFollowerCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -346,13 +347,13 @@ func (m *metricZookeeperFollowerCount) init() { m.data.SetName("zookeeper.follower.count") m.data.SetDescription("The number of followers. Only exposed by the leader.") m.data.SetUnit("{followers}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricZookeeperFollowerCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, stateAttributeValue string) { +func (m *metricZookeeperFollowerCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, stateAttributeValue string) { if !m.settings.Enabled { return } @@ -360,7 +361,7 @@ func (m *metricZookeeperFollowerCount) recordDataPoint(start pdata.Timestamp, ts dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.State, pdata.NewValueString(stateAttributeValue)) + dp.Attributes().Insert(A.State, pcommon.NewValueString(stateAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -371,7 +372,7 @@ func (m *metricZookeeperFollowerCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperFollowerCount) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperFollowerCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -382,14 +383,14 @@ func (m *metricZookeeperFollowerCount) emit(metrics pdata.MetricSlice) { func newMetricZookeeperFollowerCount(settings MetricSettings) metricZookeeperFollowerCount { m := metricZookeeperFollowerCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperFsyncExceededThresholdCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -399,12 +400,12 @@ func (m *metricZookeeperFsyncExceededThresholdCount) init() { m.data.SetName("zookeeper.fsync.exceeded_threshold.count") m.data.SetDescription("Number of times fsync duration has exceeded warning threshold.") m.data.SetUnit("{events}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperFsyncExceededThresholdCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperFsyncExceededThresholdCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -422,7 +423,7 @@ func (m *metricZookeeperFsyncExceededThresholdCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperFsyncExceededThresholdCount) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperFsyncExceededThresholdCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -433,14 +434,14 @@ func (m *metricZookeeperFsyncExceededThresholdCount) emit(metrics pdata.MetricSl func newMetricZookeeperFsyncExceededThresholdCount(settings MetricSettings) metricZookeeperFsyncExceededThresholdCount { m := metricZookeeperFsyncExceededThresholdCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperLatencyAvg struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -450,10 +451,10 @@ func (m *metricZookeeperLatencyAvg) init() { m.data.SetName("zookeeper.latency.avg") m.data.SetDescription("Average time in milliseconds for requests to be processed.") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricZookeeperLatencyAvg) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperLatencyAvg) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -471,7 +472,7 @@ func (m *metricZookeeperLatencyAvg) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperLatencyAvg) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperLatencyAvg) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -482,14 +483,14 @@ func (m *metricZookeeperLatencyAvg) emit(metrics pdata.MetricSlice) { func newMetricZookeeperLatencyAvg(settings MetricSettings) metricZookeeperLatencyAvg { m := metricZookeeperLatencyAvg{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperLatencyMax struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -499,10 +500,10 @@ func (m *metricZookeeperLatencyMax) init() { m.data.SetName("zookeeper.latency.max") m.data.SetDescription("Maximum time in milliseconds for requests to be processed.") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricZookeeperLatencyMax) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperLatencyMax) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -520,7 +521,7 @@ func (m *metricZookeeperLatencyMax) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperLatencyMax) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperLatencyMax) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -531,14 +532,14 @@ func (m *metricZookeeperLatencyMax) emit(metrics pdata.MetricSlice) { func newMetricZookeeperLatencyMax(settings MetricSettings) metricZookeeperLatencyMax { m := metricZookeeperLatencyMax{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperLatencyMin struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -548,10 +549,10 @@ func (m *metricZookeeperLatencyMin) init() { m.data.SetName("zookeeper.latency.min") m.data.SetDescription("Minimum time in milliseconds for requests to be processed.") m.data.SetUnit("ms") - m.data.SetDataType(pdata.MetricDataTypeGauge) + m.data.SetDataType(pmetric.MetricDataTypeGauge) } -func (m *metricZookeeperLatencyMin) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperLatencyMin) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -569,7 +570,7 @@ func (m *metricZookeeperLatencyMin) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperLatencyMin) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperLatencyMin) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Gauge().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -580,14 +581,14 @@ func (m *metricZookeeperLatencyMin) emit(metrics pdata.MetricSlice) { func newMetricZookeeperLatencyMin(settings MetricSettings) metricZookeeperLatencyMin { m := metricZookeeperLatencyMin{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperPacketCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -597,13 +598,13 @@ func (m *metricZookeeperPacketCount) init() { m.data.SetName("zookeeper.packet.count") m.data.SetDescription("The number of ZooKeeper packets received or sent by a server.") m.data.SetUnit("{packets}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(true) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) m.data.Sum().DataPoints().EnsureCapacity(m.capacity) } -func (m *metricZookeeperPacketCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64, directionAttributeValue string) { +func (m *metricZookeeperPacketCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64, directionAttributeValue string) { if !m.settings.Enabled { return } @@ -611,7 +612,7 @@ func (m *metricZookeeperPacketCount) recordDataPoint(start pdata.Timestamp, ts p dp.SetStartTimestamp(start) dp.SetTimestamp(ts) dp.SetIntVal(val) - dp.Attributes().Insert(A.Direction, pdata.NewValueString(directionAttributeValue)) + dp.Attributes().Insert(A.Direction, pcommon.NewValueString(directionAttributeValue)) } // updateCapacity saves max length of data point slices that will be used for the slice capacity. @@ -622,7 +623,7 @@ func (m *metricZookeeperPacketCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperPacketCount) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperPacketCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -633,14 +634,14 @@ func (m *metricZookeeperPacketCount) emit(metrics pdata.MetricSlice) { func newMetricZookeeperPacketCount(settings MetricSettings) metricZookeeperPacketCount { m := metricZookeeperPacketCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperRequestActive struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -650,12 +651,12 @@ func (m *metricZookeeperRequestActive) init() { m.data.SetName("zookeeper.request.active") m.data.SetDescription("Number of currently executing requests.") m.data.SetUnit("{requests}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperRequestActive) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperRequestActive) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -673,7 +674,7 @@ func (m *metricZookeeperRequestActive) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperRequestActive) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperRequestActive) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -684,14 +685,14 @@ func (m *metricZookeeperRequestActive) emit(metrics pdata.MetricSlice) { func newMetricZookeeperRequestActive(settings MetricSettings) metricZookeeperRequestActive { m := metricZookeeperRequestActive{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperSyncPending struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -701,12 +702,12 @@ func (m *metricZookeeperSyncPending) init() { m.data.SetName("zookeeper.sync.pending") m.data.SetDescription("The number of pending syncs from the followers. Only exposed by the leader.") m.data.SetUnit("{syncs}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperSyncPending) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperSyncPending) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -724,7 +725,7 @@ func (m *metricZookeeperSyncPending) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperSyncPending) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperSyncPending) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -735,14 +736,14 @@ func (m *metricZookeeperSyncPending) emit(metrics pdata.MetricSlice) { func newMetricZookeeperSyncPending(settings MetricSettings) metricZookeeperSyncPending { m := metricZookeeperSyncPending{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperWatchCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -752,12 +753,12 @@ func (m *metricZookeeperWatchCount) init() { m.data.SetName("zookeeper.watch.count") m.data.SetDescription("Number of watches placed on Z-Nodes on a ZooKeeper server.") m.data.SetUnit("{watches}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperWatchCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperWatchCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -775,7 +776,7 @@ func (m *metricZookeeperWatchCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperWatchCount) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperWatchCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -786,14 +787,14 @@ func (m *metricZookeeperWatchCount) emit(metrics pdata.MetricSlice) { func newMetricZookeeperWatchCount(settings MetricSettings) metricZookeeperWatchCount { m := metricZookeeperWatchCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m } type metricZookeeperZnodeCount struct { - data pdata.Metric // data buffer for generated metric. + data pmetric.Metric // data buffer for generated metric. settings MetricSettings // metric settings provided by user. capacity int // max observed number of data points added to the metric. } @@ -803,12 +804,12 @@ func (m *metricZookeeperZnodeCount) init() { m.data.SetName("zookeeper.znode.count") m.data.SetDescription("Number of z-nodes that a ZooKeeper server has in its data tree.") m.data.SetUnit("{znodes}") - m.data.SetDataType(pdata.MetricDataTypeSum) + m.data.SetDataType(pmetric.MetricDataTypeSum) m.data.Sum().SetIsMonotonic(false) - m.data.Sum().SetAggregationTemporality(pdata.MetricAggregationTemporalityCumulative) + m.data.Sum().SetAggregationTemporality(pmetric.MetricAggregationTemporalityCumulative) } -func (m *metricZookeeperZnodeCount) recordDataPoint(start pdata.Timestamp, ts pdata.Timestamp, val int64) { +func (m *metricZookeeperZnodeCount) recordDataPoint(start pcommon.Timestamp, ts pcommon.Timestamp, val int64) { if !m.settings.Enabled { return } @@ -826,7 +827,7 @@ func (m *metricZookeeperZnodeCount) updateCapacity() { } // emit appends recorded metric data to a metrics slice and prepares it for recording another set of data points. -func (m *metricZookeeperZnodeCount) emit(metrics pdata.MetricSlice) { +func (m *metricZookeeperZnodeCount) emit(metrics pmetric.MetricSlice) { if m.settings.Enabled && m.data.Sum().DataPoints().Len() > 0 { m.updateCapacity() m.data.MoveTo(metrics.AppendEmpty()) @@ -837,7 +838,7 @@ func (m *metricZookeeperZnodeCount) emit(metrics pdata.MetricSlice) { func newMetricZookeeperZnodeCount(settings MetricSettings) metricZookeeperZnodeCount { m := metricZookeeperZnodeCount{settings: settings} if settings.Enabled { - m.data = pdata.NewMetric() + m.data = pmetric.NewMetric() m.init() } return m @@ -846,10 +847,10 @@ func newMetricZookeeperZnodeCount(settings MetricSettings) metricZookeeperZnodeC // MetricsBuilder provides an interface for scrapers to report metrics while taking care of all the transformations // required to produce metric representation defined in metadata and user settings. type MetricsBuilder struct { - startTime pdata.Timestamp // start time that will be applied to all recorded data points. - metricsCapacity int // maximum observed number of metrics per resource. - resourceCapacity int // maximum observed number of resource attributes. - metricsBuffer pdata.Metrics // accumulates metrics data before emitting. + startTime pcommon.Timestamp // start time that will be applied to all recorded data points. + metricsCapacity int // maximum observed number of metrics per resource. + resourceCapacity int // maximum observed number of resource attributes. + metricsBuffer pmetric.Metrics // accumulates metrics data before emitting. metricZookeeperConnectionActive metricZookeeperConnectionActive metricZookeeperDataTreeEphemeralNodeCount metricZookeeperDataTreeEphemeralNodeCount metricZookeeperDataTreeSize metricZookeeperDataTreeSize @@ -871,7 +872,7 @@ type MetricsBuilder struct { type metricBuilderOption func(*MetricsBuilder) // WithStartTime sets startTime on the metrics builder. -func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { +func WithStartTime(startTime pcommon.Timestamp) metricBuilderOption { return func(mb *MetricsBuilder) { mb.startTime = startTime } @@ -879,8 +880,8 @@ func WithStartTime(startTime pdata.Timestamp) metricBuilderOption { func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) *MetricsBuilder { mb := &MetricsBuilder{ - startTime: pdata.NewTimestampFromTime(time.Now()), - metricsBuffer: pdata.NewMetrics(), + startTime: pcommon.NewTimestampFromTime(time.Now()), + metricsBuffer: pmetric.NewMetrics(), metricZookeeperConnectionActive: newMetricZookeeperConnectionActive(settings.ZookeeperConnectionActive), metricZookeeperDataTreeEphemeralNodeCount: newMetricZookeeperDataTreeEphemeralNodeCount(settings.ZookeeperDataTreeEphemeralNodeCount), metricZookeeperDataTreeSize: newMetricZookeeperDataTreeSize(settings.ZookeeperDataTreeSize), @@ -904,7 +905,7 @@ func NewMetricsBuilder(settings MetricsSettings, options ...metricBuilderOption) } // updateCapacity updates max length of metrics and resource attributes that will be used for the slice capacity. -func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { +func (mb *MetricsBuilder) updateCapacity(rm pmetric.ResourceMetrics) { if mb.metricsCapacity < rm.ScopeMetrics().At(0).Metrics().Len() { mb.metricsCapacity = rm.ScopeMetrics().At(0).Metrics().Len() } @@ -914,18 +915,18 @@ func (mb *MetricsBuilder) updateCapacity(rm pdata.ResourceMetrics) { } // ResourceOption applies changes to provided resource. -type ResourceOption func(pdata.Resource) +type ResourceOption func(pcommon.Resource) // WithServerState sets provided value as "server.state" attribute for current resource. func WithServerState(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("server.state", val) } } // WithZkVersion sets provided value as "zk.version" attribute for current resource. func WithZkVersion(val string) ResourceOption { - return func(r pdata.Resource) { + return func(r pcommon.Resource) { r.Attributes().UpsertString("zk.version", val) } } @@ -935,7 +936,7 @@ func WithZkVersion(val string) ResourceOption { // needs to emit metrics from several resources. Otherwise calling this function is not required, // just `Emit` function can be called instead. Resource attributes should be provided as ResourceOption arguments. func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { - rm := pdata.NewResourceMetrics() + rm := pmetric.NewResourceMetrics() rm.Resource().Attributes().EnsureCapacity(mb.resourceCapacity) for _, op := range ro { op(rm.Resource()) @@ -967,92 +968,92 @@ func (mb *MetricsBuilder) EmitForResource(ro ...ResourceOption) { // Emit returns all the metrics accumulated by the metrics builder and updates the internal state to be ready for // recording another set of metrics. This function will be responsible for applying all the transformations required to // produce metric representation defined in metadata and user settings, e.g. delta or cumulative. -func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pdata.Metrics { +func (mb *MetricsBuilder) Emit(ro ...ResourceOption) pmetric.Metrics { mb.EmitForResource(ro...) - metrics := pdata.NewMetrics() + metrics := pmetric.NewMetrics() mb.metricsBuffer.MoveTo(metrics) return metrics } // RecordZookeeperConnectionActiveDataPoint adds a data point to zookeeper.connection.active metric. -func (mb *MetricsBuilder) RecordZookeeperConnectionActiveDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperConnectionActiveDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperConnectionActive.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperDataTreeEphemeralNodeCountDataPoint adds a data point to zookeeper.data_tree.ephemeral_node.count metric. -func (mb *MetricsBuilder) RecordZookeeperDataTreeEphemeralNodeCountDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperDataTreeEphemeralNodeCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperDataTreeEphemeralNodeCount.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperDataTreeSizeDataPoint adds a data point to zookeeper.data_tree.size metric. -func (mb *MetricsBuilder) RecordZookeeperDataTreeSizeDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperDataTreeSizeDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperDataTreeSize.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperFileDescriptorLimitDataPoint adds a data point to zookeeper.file_descriptor.limit metric. -func (mb *MetricsBuilder) RecordZookeeperFileDescriptorLimitDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperFileDescriptorLimitDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperFileDescriptorLimit.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperFileDescriptorOpenDataPoint adds a data point to zookeeper.file_descriptor.open metric. -func (mb *MetricsBuilder) RecordZookeeperFileDescriptorOpenDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperFileDescriptorOpenDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperFileDescriptorOpen.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperFollowerCountDataPoint adds a data point to zookeeper.follower.count metric. -func (mb *MetricsBuilder) RecordZookeeperFollowerCountDataPoint(ts pdata.Timestamp, val int64, stateAttributeValue string) { +func (mb *MetricsBuilder) RecordZookeeperFollowerCountDataPoint(ts pcommon.Timestamp, val int64, stateAttributeValue string) { mb.metricZookeeperFollowerCount.recordDataPoint(mb.startTime, ts, val, stateAttributeValue) } // RecordZookeeperFsyncExceededThresholdCountDataPoint adds a data point to zookeeper.fsync.exceeded_threshold.count metric. -func (mb *MetricsBuilder) RecordZookeeperFsyncExceededThresholdCountDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperFsyncExceededThresholdCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperFsyncExceededThresholdCount.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperLatencyAvgDataPoint adds a data point to zookeeper.latency.avg metric. -func (mb *MetricsBuilder) RecordZookeeperLatencyAvgDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperLatencyAvgDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperLatencyAvg.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperLatencyMaxDataPoint adds a data point to zookeeper.latency.max metric. -func (mb *MetricsBuilder) RecordZookeeperLatencyMaxDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperLatencyMaxDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperLatencyMax.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperLatencyMinDataPoint adds a data point to zookeeper.latency.min metric. -func (mb *MetricsBuilder) RecordZookeeperLatencyMinDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperLatencyMinDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperLatencyMin.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperPacketCountDataPoint adds a data point to zookeeper.packet.count metric. -func (mb *MetricsBuilder) RecordZookeeperPacketCountDataPoint(ts pdata.Timestamp, val int64, directionAttributeValue string) { +func (mb *MetricsBuilder) RecordZookeeperPacketCountDataPoint(ts pcommon.Timestamp, val int64, directionAttributeValue string) { mb.metricZookeeperPacketCount.recordDataPoint(mb.startTime, ts, val, directionAttributeValue) } // RecordZookeeperRequestActiveDataPoint adds a data point to zookeeper.request.active metric. -func (mb *MetricsBuilder) RecordZookeeperRequestActiveDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperRequestActiveDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperRequestActive.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperSyncPendingDataPoint adds a data point to zookeeper.sync.pending metric. -func (mb *MetricsBuilder) RecordZookeeperSyncPendingDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperSyncPendingDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperSyncPending.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperWatchCountDataPoint adds a data point to zookeeper.watch.count metric. -func (mb *MetricsBuilder) RecordZookeeperWatchCountDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperWatchCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperWatchCount.recordDataPoint(mb.startTime, ts, val) } // RecordZookeeperZnodeCountDataPoint adds a data point to zookeeper.znode.count metric. -func (mb *MetricsBuilder) RecordZookeeperZnodeCountDataPoint(ts pdata.Timestamp, val int64) { +func (mb *MetricsBuilder) RecordZookeeperZnodeCountDataPoint(ts pcommon.Timestamp, val int64) { mb.metricZookeeperZnodeCount.recordDataPoint(mb.startTime, ts, val) } // Reset resets metrics builder to its initial state. It should be used when external metrics source is restarted, // and metrics builder should update its startTime and reset it's internal state accordingly. func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) { - mb.startTime = pdata.NewTimestampFromTime(time.Now()) + mb.startTime = pcommon.NewTimestampFromTime(time.Now()) for _, op := range options { op(mb) } diff --git a/receiver/zookeeperreceiver/metrics.go b/receiver/zookeeperreceiver/metrics.go index 5bf5356dd4c4..e946291b2a00 100644 --- a/receiver/zookeeperreceiver/metrics.go +++ b/receiver/zookeeperreceiver/metrics.go @@ -17,7 +17,7 @@ package zookeeperreceiver // import "github.com/open-telemetry/opentelemetry-col import ( "fmt" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zookeeperreceiver/internal/metadata" @@ -62,14 +62,14 @@ func newMetricCreator(mb *metadata.MetricsBuilder) *metricCreator { } } -func (m *metricCreator) recordDataPointsFunc(metric string) func(ts pdata.Timestamp, val int64) { +func (m *metricCreator) recordDataPointsFunc(metric string) func(ts pcommon.Timestamp, val int64) { switch metric { case followersMetricKey: - return func(ts pdata.Timestamp, val int64) { + return func(ts pcommon.Timestamp, val int64) { m.computedMetricStore[followersMetricKey] = val } case syncedFollowersMetricKey: - return func(ts pdata.Timestamp, val int64) { + return func(ts pcommon.Timestamp, val int64) { m.computedMetricStore[syncedFollowersMetricKey] = val m.mb.RecordZookeeperFollowerCountDataPoint(ts, val, metadata.AttributeState.Synced) } @@ -100,11 +100,11 @@ func (m *metricCreator) recordDataPointsFunc(metric string) func(ts pdata.Timest case fSyncThresholdExceedCountMetricKey: return m.mb.RecordZookeeperFsyncExceededThresholdCountDataPoint case packetsReceivedMetricKey: - return func(ts pdata.Timestamp, val int64) { + return func(ts pcommon.Timestamp, val int64) { m.mb.RecordZookeeperPacketCountDataPoint(ts, val, metadata.AttributeDirection.Received) } case packetsSentMetricKey: - return func(ts pdata.Timestamp, val int64) { + return func(ts pcommon.Timestamp, val int64) { m.mb.RecordZookeeperPacketCountDataPoint(ts, val, metadata.AttributeDirection.Sent) } } @@ -112,7 +112,7 @@ func (m *metricCreator) recordDataPointsFunc(metric string) func(ts pdata.Timest return nil } -func (m *metricCreator) generateComputedMetrics(logger *zap.Logger, ts pdata.Timestamp) { +func (m *metricCreator) generateComputedMetrics(logger *zap.Logger, ts pcommon.Timestamp) { // not_synced Followers Count if err := m.computeNotSyncedFollowersMetric(ts); err != nil { logger.Debug("metric computation failed", zap.Error(err)) @@ -120,7 +120,7 @@ func (m *metricCreator) generateComputedMetrics(logger *zap.Logger, ts pdata.Tim } -func (m *metricCreator) computeNotSyncedFollowersMetric(ts pdata.Timestamp) error { +func (m *metricCreator) computeNotSyncedFollowersMetric(ts pcommon.Timestamp) error { followersTotal, ok := m.computedMetricStore[followersMetricKey] if !ok { return fmt.Errorf("could not compute not_synced follower.count, missing %s", followersMetricKey) diff --git a/receiver/zookeeperreceiver/scraper.go b/receiver/zookeeperreceiver/scraper.go index 2a05bc81e1a0..4c40462637b7 100644 --- a/receiver/zookeeperreceiver/scraper.go +++ b/receiver/zookeeperreceiver/scraper.go @@ -24,7 +24,8 @@ import ( "strconv" "time" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zookeeperreceiver/internal/metadata" @@ -80,7 +81,7 @@ func (z *zookeeperMetricsScraper) shutdown(_ context.Context) error { return nil } -func (z *zookeeperMetricsScraper) scrape(ctx context.Context) (pdata.Metrics, error) { +func (z *zookeeperMetricsScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { var ctxWithTimeout context.Context ctxWithTimeout, z.cancel = context.WithTimeout(ctx, z.config.Timeout) @@ -90,7 +91,7 @@ func (z *zookeeperMetricsScraper) scrape(ctx context.Context) (pdata.Metrics, er zap.String("endpoint", z.config.Endpoint), zap.Error(err), ) - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } defer func() { if closeErr := z.closeConnection(conn); closeErr != nil { @@ -108,18 +109,18 @@ func (z *zookeeperMetricsScraper) scrape(ctx context.Context) (pdata.Metrics, er return z.getResourceMetrics(conn) } -func (z *zookeeperMetricsScraper) getResourceMetrics(conn net.Conn) (pdata.Metrics, error) { +func (z *zookeeperMetricsScraper) getResourceMetrics(conn net.Conn) (pmetric.Metrics, error) { scanner, err := z.sendCmd(conn, mntrCommand) if err != nil { z.logger.Error("failed to send command", zap.Error(err), zap.String("command", mntrCommand), ) - return pdata.NewMetrics(), err + return pmetric.NewMetrics(), err } creator := newMetricCreator(z.mb) - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) resourceOpts := make([]metadata.ResourceOption, 0, 2) for scanner.Scan() { line := scanner.Text() diff --git a/receiver/zookeeperreceiver/scraper_test.go b/receiver/zookeeperreceiver/scraper_test.go index dfd7e220ed04..424f5598637f 100644 --- a/receiver/zookeeperreceiver/scraper_test.go +++ b/receiver/zookeeperreceiver/scraper_test.go @@ -26,7 +26,7 @@ import ( "time" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" @@ -251,7 +251,7 @@ func TestZookeeperMetricsScraperScrape(t *testing.T) { if tt.expectedNumResourceMetrics == 0 { if tt.wantErr { require.Error(t, err) - require.Equal(t, pdata.NewMetrics(), actualMetrics) + require.Equal(t, pmetric.NewMetrics(), actualMetrics) } require.NoError(t, z.shutdown(ctx)) diff --git a/testbed/correctnesstests/metrics/metric_diff.go b/testbed/correctnesstests/metrics/metric_diff.go index e69007364065..5897fb2c257c 100644 --- a/testbed/correctnesstests/metrics/metric_diff.go +++ b/testbed/correctnesstests/metrics/metric_diff.go @@ -18,7 +18,8 @@ import ( "fmt" "reflect" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/pmetric" ) // MetricDiff is intended to support producing human-readable diffs between two MetricData structs during @@ -34,7 +35,7 @@ func (mf MetricDiff) String() string { return fmt.Sprintf("{msg='%v' expected=[%v] actual=[%v]}\n", mf.Msg, mf.ExpectedValue, mf.ActualValue) } -func diffRMSlices(sent []pdata.ResourceMetrics, recd []pdata.ResourceMetrics) []*MetricDiff { +func diffRMSlices(sent []pmetric.ResourceMetrics, recd []pmetric.ResourceMetrics) []*MetricDiff { var diffs []*MetricDiff if len(sent) != len(recd) { return []*MetricDiff{{ @@ -51,7 +52,7 @@ func diffRMSlices(sent []pdata.ResourceMetrics, recd []pdata.ResourceMetrics) [] return diffs } -func diffRMs(diffs []*MetricDiff, expected pdata.ResourceMetrics, actual pdata.ResourceMetrics) []*MetricDiff { +func diffRMs(diffs []*MetricDiff, expected pmetric.ResourceMetrics, actual pmetric.ResourceMetrics) []*MetricDiff { diffs = diffResource(diffs, expected.Resource(), actual.Resource()) diffs = diffILMSlice( diffs, @@ -63,8 +64,8 @@ func diffRMs(diffs []*MetricDiff, expected pdata.ResourceMetrics, actual pdata.R func diffILMSlice( diffs []*MetricDiff, - expected pdata.ScopeMetricsSlice, - actual pdata.ScopeMetricsSlice, + expected pmetric.ScopeMetricsSlice, + actual pmetric.ScopeMetricsSlice, ) []*MetricDiff { var mismatch bool diffs, mismatch = diffValues(diffs, actual.Len(), expected.Len(), "ScopeMetricsSlice len") @@ -79,13 +80,13 @@ func diffILMSlice( func diffILM( diffs []*MetricDiff, - expected pdata.ScopeMetrics, - actual pdata.ScopeMetrics, + expected pmetric.ScopeMetrics, + actual pmetric.ScopeMetrics, ) []*MetricDiff { return diffMetrics(diffs, expected.Metrics(), actual.Metrics()) } -func diffMetrics(diffs []*MetricDiff, expected pdata.MetricSlice, actual pdata.MetricSlice) []*MetricDiff { +func diffMetrics(diffs []*MetricDiff, expected pmetric.MetricSlice, actual pmetric.MetricSlice) []*MetricDiff { var mismatch bool diffs, mismatch = diffValues(diffs, actual.Len(), expected.Len(), "MetricSlice len") if mismatch { @@ -97,20 +98,20 @@ func diffMetrics(diffs []*MetricDiff, expected pdata.MetricSlice, actual pdata.M return diffs } -func DiffMetric(diffs []*MetricDiff, expected pdata.Metric, actual pdata.Metric) []*MetricDiff { +func DiffMetric(diffs []*MetricDiff, expected pmetric.Metric, actual pmetric.Metric) []*MetricDiff { var mismatch bool diffs, mismatch = diffMetricDescriptor(diffs, expected, actual) if mismatch { return diffs } switch actual.DataType() { - case pdata.MetricDataTypeGauge: + case pmetric.MetricDataTypeGauge: diffs = diffNumberPts(diffs, expected.Gauge().DataPoints(), actual.Gauge().DataPoints()) - case pdata.MetricDataTypeSum: + case pmetric.MetricDataTypeSum: diffs = diff(diffs, expected.Sum().IsMonotonic(), actual.Sum().IsMonotonic(), "Sum IsMonotonic") diffs = diff(diffs, expected.Sum().AggregationTemporality(), actual.Sum().AggregationTemporality(), "Sum AggregationTemporality") diffs = diffNumberPts(diffs, expected.Sum().DataPoints(), actual.Sum().DataPoints()) - case pdata.MetricDataTypeHistogram: + case pmetric.MetricDataTypeHistogram: diffs = diff(diffs, expected.Histogram().AggregationTemporality(), actual.Histogram().AggregationTemporality(), "Histogram AggregationTemporality") diffs = diffHistogramPts(diffs, expected.Histogram().DataPoints(), actual.Histogram().DataPoints()) } @@ -119,8 +120,8 @@ func DiffMetric(diffs []*MetricDiff, expected pdata.Metric, actual pdata.Metric) func diffMetricDescriptor( diffs []*MetricDiff, - expected pdata.Metric, - actual pdata.Metric, + expected pmetric.Metric, + actual pmetric.Metric, ) ([]*MetricDiff, bool) { diffs = diff(diffs, expected.Name(), actual.Name(), "Metric Name") diffs = diff(diffs, expected.Description(), actual.Description(), "Metric Description") @@ -130,8 +131,8 @@ func diffMetricDescriptor( func diffNumberPts( diffs []*MetricDiff, - expected pdata.NumberDataPointSlice, - actual pdata.NumberDataPointSlice, + expected pmetric.NumberDataPointSlice, + actual pmetric.NumberDataPointSlice, ) []*MetricDiff { var mismatch bool diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "NumberDataPointSlice len") @@ -144,9 +145,9 @@ func diffNumberPts( return diffs } switch expected.At(i).ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: diffs = diff(diffs, expected.At(i).IntVal(), actual.At(i).IntVal(), "NumberDataPoint Value") - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: diffs = diff(diffs, expected.At(i).DoubleVal(), actual.At(i).DoubleVal(), "NumberDataPoint Value") } diffExemplars(diffs, expected.At(i).Exemplars(), actual.At(i).Exemplars()) @@ -156,8 +157,8 @@ func diffNumberPts( func diffHistogramPts( diffs []*MetricDiff, - expected pdata.HistogramDataPointSlice, - actual pdata.HistogramDataPointSlice, + expected pmetric.HistogramDataPointSlice, + actual pmetric.HistogramDataPointSlice, ) []*MetricDiff { var mismatch bool diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "HistogramDataPointSlice len") @@ -172,8 +173,8 @@ func diffHistogramPts( func diffDoubleHistogramPt( diffs []*MetricDiff, - expected pdata.HistogramDataPoint, - actual pdata.HistogramDataPoint, + expected pmetric.HistogramDataPoint, + actual pmetric.HistogramDataPoint, ) []*MetricDiff { diffs = diff(diffs, expected.Count(), actual.Count(), "HistogramDataPoint Count") diffs = diff(diffs, expected.Sum(), actual.Sum(), "HistogramDataPoint Sum") @@ -185,8 +186,8 @@ func diffDoubleHistogramPt( func diffExemplars( diffs []*MetricDiff, - expected pdata.ExemplarSlice, - actual pdata.ExemplarSlice, + expected pmetric.ExemplarSlice, + actual pmetric.ExemplarSlice, ) []*MetricDiff { var mismatch bool diffs, mismatch = diffValues(diffs, expected.Len(), actual.Len(), "ExemplarSlice len") @@ -196,20 +197,20 @@ func diffExemplars( for i := 0; i < expected.Len(); i++ { diffs = diff(diffs, expected.At(i).ValueType(), actual.At(i).ValueType(), "Exemplar Value Type") switch expected.At(i).ValueType() { - case pdata.MetricValueTypeInt: + case pmetric.MetricValueTypeInt: diffs = diff(diffs, expected.At(i).IntVal(), actual.At(i).IntVal(), "Exemplar Value") - case pdata.MetricValueTypeDouble: + case pmetric.MetricValueTypeDouble: diffs = diff(diffs, expected.At(i).DoubleVal(), actual.At(i).DoubleVal(), "Exemplar Value") } } return diffs } -func diffResource(diffs []*MetricDiff, expected pdata.Resource, actual pdata.Resource) []*MetricDiff { +func diffResource(diffs []*MetricDiff, expected pcommon.Resource, actual pcommon.Resource) []*MetricDiff { return diffAttrs(diffs, expected.Attributes(), actual.Attributes()) } -func diffAttrs(diffs []*MetricDiff, expected pdata.Map, actual pdata.Map) []*MetricDiff { +func diffAttrs(diffs []*MetricDiff, expected pcommon.Map, actual pcommon.Map) []*MetricDiff { if !reflect.DeepEqual(expected, actual) { diffs = append(diffs, &MetricDiff{ ExpectedValue: attrMapToString(expected), @@ -241,9 +242,9 @@ func diffValues( return diffs, false } -func attrMapToString(m pdata.Map) string { +func attrMapToString(m pcommon.Map) string { out := "" - m.Range(func(k string, v pdata.Value) bool { + m.Range(func(k string, v pcommon.Value) bool { out += "[" + k + "=" + v.StringVal() + "]" return true }) diff --git a/testbed/correctnesstests/metrics/metric_diff_test.go b/testbed/correctnesstests/metrics/metric_diff_test.go index bde97b0dde2d..de629ad3b2f1 100644 --- a/testbed/correctnesstests/metrics/metric_diff_test.go +++ b/testbed/correctnesstests/metrics/metric_diff_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/goldendataset" ) @@ -30,13 +30,13 @@ func TestSameMetrics(t *testing.T) { assert.Nil(t, diffs) } -func diffMetricData(expected pdata.Metrics, actual pdata.Metrics) []*MetricDiff { +func diffMetricData(expected pmetric.Metrics, actual pmetric.Metrics) []*MetricDiff { expectedRMSlice := expected.ResourceMetrics() actualRMSlice := actual.ResourceMetrics() return diffRMSlices(toSlice(expectedRMSlice), toSlice(actualRMSlice)) } -func toSlice(s pdata.ResourceMetricsSlice) (out []pdata.ResourceMetrics) { +func toSlice(s pmetric.ResourceMetricsSlice) (out []pmetric.ResourceMetrics) { for i := 0; i < s.Len(); i++ { out = append(out, s.At(i)) } @@ -64,7 +64,7 @@ func TestDifferentNumPts(t *testing.T) { func TestDifferentPtValueTypes(t *testing.T) { expected := goldendataset.MetricsFromCfg(goldendataset.DefaultCfg()) cfg := goldendataset.DefaultCfg() - cfg.MetricValueType = pdata.MetricValueTypeDouble + cfg.MetricValueType = pmetric.MetricValueTypeDouble actual := goldendataset.MetricsFromCfg(cfg) diffs := diffMetricData(expected, actual) assert.Len(t, diffs, 1) @@ -72,10 +72,10 @@ func TestDifferentPtValueTypes(t *testing.T) { func TestHistogram(t *testing.T) { cfg1 := goldendataset.DefaultCfg() - cfg1.MetricDescriptorType = pdata.MetricDataTypeHistogram + cfg1.MetricDescriptorType = pmetric.MetricDataTypeHistogram expected := goldendataset.MetricsFromCfg(cfg1) cfg2 := goldendataset.DefaultCfg() - cfg2.MetricDescriptorType = pdata.MetricDataTypeHistogram + cfg2.MetricDescriptorType = pmetric.MetricDataTypeHistogram cfg2.PtVal = 2 actual := goldendataset.MetricsFromCfg(cfg2) diffs := diffMetricData(expected, actual) diff --git a/testbed/correctnesstests/metrics/metric_index.go b/testbed/correctnesstests/metrics/metric_index.go index c6a30905cc5a..fd2514980a7b 100644 --- a/testbed/correctnesstests/metrics/metric_index.go +++ b/testbed/correctnesstests/metrics/metric_index.go @@ -15,11 +15,11 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/correctnesstests/metrics" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) type metricReceived struct { - pdm pdata.Metrics + pdm pmetric.Metrics received bool } @@ -27,7 +27,7 @@ type metricsReceivedIndex struct { m map[string]*metricReceived } -func newMetricsReceivedIndex(pdms []pdata.Metrics) *metricsReceivedIndex { +func newMetricsReceivedIndex(pdms []pmetric.Metrics) *metricsReceivedIndex { mi := &metricsReceivedIndex{m: map[string]*metricReceived{}} for _, pdm := range pdms { metrics := pdm.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() diff --git a/testbed/correctnesstests/metrics/metric_supplier.go b/testbed/correctnesstests/metrics/metric_supplier.go index ea0d43df62b2..6316f3d274ba 100644 --- a/testbed/correctnesstests/metrics/metric_supplier.go +++ b/testbed/correctnesstests/metrics/metric_supplier.go @@ -15,21 +15,21 @@ package metrics // import "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/correctnesstests/metrics" import ( - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" ) type metricSupplier struct { - pdms []pdata.Metrics + pdms []pmetric.Metrics currIdx int } -func newMetricSupplier(pdms []pdata.Metrics) *metricSupplier { +func newMetricSupplier(pdms []pmetric.Metrics) *metricSupplier { return &metricSupplier{pdms: pdms} } -func (p *metricSupplier) nextMetrics() (pdm pdata.Metrics, done bool) { +func (p *metricSupplier) nextMetrics() (pdm pmetric.Metrics, done bool) { if p.currIdx == len(p.pdms) { - return pdata.Metrics{}, true + return pmetric.Metrics{}, true } pdm = p.pdms[p.currIdx] p.currIdx++ diff --git a/testbed/correctnesstests/metrics/metrics_correctness_test.go b/testbed/correctnesstests/metrics/metrics_correctness_test.go index 52d76b18cbd2..f945042e2348 100644 --- a/testbed/correctnesstests/metrics/metrics_correctness_test.go +++ b/testbed/correctnesstests/metrics/metrics_correctness_test.go @@ -20,7 +20,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/goldendataset" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/correctnesstests" @@ -90,7 +90,7 @@ func testWithMetricsGoldenDataset( return r } -func getTestMetrics(t *testing.T) []pdata.Metrics { +func getTestMetrics(t *testing.T) []pmetric.Metrics { const file = "../../../internal/coreinternal/goldendataset/testdata/generated_pict_pairs_metrics.txt" mds, err := goldendataset.GenerateMetrics(file) require.NoError(t, err) diff --git a/testbed/correctnesstests/metrics/metrics_test_harness.go b/testbed/correctnesstests/metrics/metrics_test_harness.go index 14a2fbd5d017..f70019d062a4 100644 --- a/testbed/correctnesstests/metrics/metrics_test_harness.go +++ b/testbed/correctnesstests/metrics/metrics_test_harness.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) @@ -33,7 +33,7 @@ type testHarness struct { metricSupplier *metricSupplier metricIndex *metricsReceivedIndex sender testbed.MetricDataSender - currPDM pdata.Metrics + currPDM pmetric.Metrics diffConsumer diffConsumer outOfMetrics bool allMetricsReceived chan struct{} @@ -64,7 +64,7 @@ func (h *testHarness) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (h *testHarness) ConsumeMetrics(_ context.Context, pdm pdata.Metrics) error { +func (h *testHarness) ConsumeMetrics(_ context.Context, pdm pmetric.Metrics) error { h.compare(pdm) if h.metricIndex.allReceived() { close(h.allMetricsReceived) @@ -75,7 +75,7 @@ func (h *testHarness) ConsumeMetrics(_ context.Context, pdm pdata.Metrics) error return nil } -func (h *testHarness) compare(pdm pdata.Metrics) { +func (h *testHarness) compare(pdm pmetric.Metrics) { pdms := pdm.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() var diffs []*MetricDiff for i := 0; i < pdms.Len(); i++ { diff --git a/testbed/datasenders/fluent.go b/testbed/datasenders/fluent.go index eb65cae609ed..2c5ffe669522 100644 --- a/testbed/datasenders/fluent.go +++ b/testbed/datasenders/fluent.go @@ -26,7 +26,8 @@ import ( "github.com/fluent/fluent-logger-golang/fluent" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) @@ -86,7 +87,7 @@ func (f *FluentLogsForwarder) Stop() error { return f.fluentLogger.Close() } -func (f *FluentLogsForwarder) ConsumeLogs(_ context.Context, logs pdata.Logs) error { +func (f *FluentLogsForwarder) ConsumeLogs(_ context.Context, logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) @@ -106,22 +107,22 @@ func (f *FluentLogsForwarder) ConsumeLogs(_ context.Context, logs pdata.Logs) er return nil } -func (f *FluentLogsForwarder) convertLogToMap(lr pdata.LogRecord) map[string]string { +func (f *FluentLogsForwarder) convertLogToMap(lr plog.LogRecord) map[string]string { out := map[string]string{} - if lr.Body().Type() == pdata.ValueTypeString { + if lr.Body().Type() == pcommon.ValueTypeString { out["log"] = lr.Body().StringVal() } - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: out[k] = v.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: out[k] = strconv.FormatInt(v.IntVal(), 10) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: out[k] = strconv.FormatFloat(v.DoubleVal(), 'f', -1, 64) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: out[k] = strconv.FormatBool(v.BoolVal()) default: panic("missing case") @@ -132,21 +133,21 @@ func (f *FluentLogsForwarder) convertLogToMap(lr pdata.LogRecord) map[string]str return out } -func (f *FluentLogsForwarder) convertLogToJSON(lr pdata.LogRecord) []byte { +func (f *FluentLogsForwarder) convertLogToJSON(lr plog.LogRecord) []byte { rec := map[string]string{ "time": time.Unix(0, int64(lr.Timestamp())).Format("02/01/2006:15:04:05Z"), } rec["log"] = lr.Body().StringVal() - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: rec[k] = v.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: rec[k] = strconv.FormatInt(v.IntVal(), 10) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: rec[k] = strconv.FormatFloat(v.DoubleVal(), 'f', -1, 64) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: rec[k] = strconv.FormatBool(v.BoolVal()) default: panic("missing case") diff --git a/testbed/datasenders/fluentbit.go b/testbed/datasenders/fluentbit.go index 2461c9e0bb7b..fab18487b320 100644 --- a/testbed/datasenders/fluentbit.go +++ b/testbed/datasenders/fluentbit.go @@ -25,7 +25,8 @@ import ( "time" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) @@ -91,7 +92,7 @@ func (f *FluentBitFileLogWriter) setupParsers() { f.parsersFile.Close() } -func (f *FluentBitFileLogWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { +func (f *FluentBitFileLogWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) @@ -106,21 +107,21 @@ func (f *FluentBitFileLogWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) return nil } -func (f *FluentBitFileLogWriter) convertLogToJSON(lr pdata.LogRecord) []byte { +func (f *FluentBitFileLogWriter) convertLogToJSON(lr plog.LogRecord) []byte { rec := map[string]string{ "time": time.Unix(0, int64(lr.Timestamp())).Format("02/01/2006:15:04:05Z"), } rec["log"] = lr.Body().StringVal() - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: rec[k] = v.StringVal() - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: rec[k] = strconv.FormatInt(v.IntVal(), 10) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: rec[k] = strconv.FormatFloat(v.DoubleVal(), 'f', -1, 64) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: rec[k] = strconv.FormatBool(v.BoolVal()) default: panic("missing case") diff --git a/testbed/datasenders/k8s.go b/testbed/datasenders/k8s.go index 1887951a0b42..ccab7d48ab91 100644 --- a/testbed/datasenders/k8s.go +++ b/testbed/datasenders/k8s.go @@ -25,7 +25,8 @@ import ( "time" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) @@ -89,7 +90,7 @@ func (f *FileLogK8sWriter) Start() error { return nil } -func (f *FileLogK8sWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { +func (f *FileLogK8sWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) @@ -104,7 +105,7 @@ func (f *FileLogK8sWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error return nil } -func (f *FileLogK8sWriter) convertLogToTextLine(lr pdata.LogRecord) []byte { +func (f *FileLogK8sWriter) convertLogToTextLine(lr plog.LogRecord) []byte { sb := strings.Builder{} // Timestamp @@ -115,22 +116,22 @@ func (f *FileLogK8sWriter) convertLogToTextLine(lr pdata.LogRecord) []byte { sb.WriteString(lr.SeverityText()) sb.WriteString(" ") - if lr.Body().Type() == pdata.ValueTypeString { + if lr.Body().Type() == pcommon.ValueTypeString { sb.WriteString(lr.Body().StringVal()) } - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { sb.WriteString(" ") sb.WriteString(k) sb.WriteString("=") switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: sb.WriteString(v.StringVal()) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: sb.WriteString(strconv.FormatInt(v.IntVal(), 10)) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: sb.WriteString(strconv.FormatFloat(v.DoubleVal(), 'f', -1, 64)) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: sb.WriteString(strconv.FormatBool(v.BoolVal())) default: panic("missing case") diff --git a/testbed/datasenders/stanza.go b/testbed/datasenders/stanza.go index d8c312dee99c..37d2a8977a32 100644 --- a/testbed/datasenders/stanza.go +++ b/testbed/datasenders/stanza.go @@ -25,7 +25,8 @@ import ( "time" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) @@ -63,7 +64,7 @@ func (f *FileLogWriter) Start() error { return nil } -func (f *FileLogWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { +func (f *FileLogWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) @@ -78,7 +79,7 @@ func (f *FileLogWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { return nil } -func (f *FileLogWriter) convertLogToTextLine(lr pdata.LogRecord) []byte { +func (f *FileLogWriter) convertLogToTextLine(lr plog.LogRecord) []byte { sb := strings.Builder{} // Timestamp @@ -89,22 +90,22 @@ func (f *FileLogWriter) convertLogToTextLine(lr pdata.LogRecord) []byte { sb.WriteString(lr.SeverityText()) sb.WriteString(" ") - if lr.Body().Type() == pdata.ValueTypeString { + if lr.Body().Type() == pcommon.ValueTypeString { sb.WriteString(lr.Body().StringVal()) } - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { sb.WriteString(" ") sb.WriteString(k) sb.WriteString("=") switch v.Type() { - case pdata.ValueTypeString: + case pcommon.ValueTypeString: sb.WriteString(v.StringVal()) - case pdata.ValueTypeInt: + case pcommon.ValueTypeInt: sb.WriteString(strconv.FormatInt(v.IntVal(), 10)) - case pdata.ValueTypeDouble: + case pcommon.ValueTypeDouble: sb.WriteString(strconv.FormatFloat(v.DoubleVal(), 'f', -1, 64)) - case pdata.ValueTypeBool: + case pcommon.ValueTypeBool: sb.WriteString(strconv.FormatBool(v.BoolVal())) default: panic("missing case") diff --git a/testbed/datasenders/syslog.go b/testbed/datasenders/syslog.go index 5d8bb23ab098..eaebfb0f0bca 100644 --- a/testbed/datasenders/syslog.go +++ b/testbed/datasenders/syslog.go @@ -23,7 +23,8 @@ import ( "time" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) @@ -75,7 +76,7 @@ func (f *SyslogWriter) Start() (err error) { return err } -func (f *SyslogWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { +func (f *SyslogWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) @@ -98,13 +99,13 @@ func (f *SyslogWriter) GenConfigYAMLStr() string { listen_address: "%s" `, f.network, f.GetEndpoint()) } -func (f *SyslogWriter) Send(lr pdata.LogRecord) error { +func (f *SyslogWriter) Send(lr plog.LogRecord) error { ts := time.Unix(int64(lr.Timestamp()/1000000000), int64(lr.Timestamp()%100000000)).Format(time.RFC3339Nano) sdid := strings.Builder{} sdid.WriteString(fmt.Sprintf("%s=\"%s\" ", "trace_id", lr.TraceID().HexString())) sdid.WriteString(fmt.Sprintf("%s=\"%s\" ", "span_id", lr.SpanID().HexString())) sdid.WriteString(fmt.Sprintf("%s=\"%d\" ", "trace_flags", lr.Flags())) - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { sdid.WriteString(fmt.Sprintf("%s=\"%s\" ", k, v.StringVal())) return true }) diff --git a/testbed/datasenders/tcpudp.go b/testbed/datasenders/tcpudp.go index 28a119453d36..b93505f3f545 100644 --- a/testbed/datasenders/tcpudp.go +++ b/testbed/datasenders/tcpudp.go @@ -23,7 +23,8 @@ import ( "time" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) @@ -75,7 +76,7 @@ func (f *TCPUDPWriter) Start() (err error) { return err } -func (f *TCPUDPWriter) ConsumeLogs(_ context.Context, logs pdata.Logs) error { +func (f *TCPUDPWriter) ConsumeLogs(_ context.Context, logs plog.Logs) error { for i := 0; i < logs.ResourceLogs().Len(); i++ { for j := 0; j < logs.ResourceLogs().At(i).ScopeLogs().Len(); j++ { ills := logs.ResourceLogs().At(i).ScopeLogs().At(j) @@ -96,13 +97,13 @@ func (f *TCPUDPWriter) GenConfigYAMLStr() string { listen_address: "%s" `, f.network, f.GetEndpoint()) } -func (f *TCPUDPWriter) Send(lr pdata.LogRecord) error { +func (f *TCPUDPWriter) Send(lr plog.LogRecord) error { ts := time.Unix(int64(lr.Timestamp()/1000000000), int64(lr.Timestamp()%100000000)).Format(time.RFC3339Nano) sdid := strings.Builder{} sdid.WriteString(fmt.Sprintf("%s=\"%s\" ", "trace_id", lr.TraceID().HexString())) sdid.WriteString(fmt.Sprintf("%s=\"%s\" ", "span_id", lr.SpanID().HexString())) sdid.WriteString(fmt.Sprintf("%s=\"%d\" ", "trace_flags", lr.Flags())) - lr.Attributes().Range(func(k string, v pdata.Value) bool { + lr.Attributes().Range(func(k string, v pcommon.Value) bool { sdid.WriteString(fmt.Sprintf("%s=\"%s\" ", k, v.StringVal())) return true }) diff --git a/testbed/go.mod b/testbed/go.mod index 4a4b6fe30806..f957542b70fb 100644 --- a/testbed/go.mod +++ b/testbed/go.mod @@ -26,8 +26,8 @@ require ( github.com/prometheus/prometheus v1.8.2-0.20220324155304-4d8bbfd4164c github.com/shirou/gopsutil/v3 v3.22.3 github.com/stretchr/testify v1.7.1 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d go.uber.org/atomic v1.9.0 go.uber.org/zap v1.21.0 golang.org/x/text v0.3.7 @@ -35,7 +35,7 @@ require ( require ( cloud.google.com/go/compute v1.5.0 // indirect - contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect + contrib.go.opencensus.io/exporter/prometheus v0.4.1 // indirect github.com/Azure/azure-sdk-for-go v62.0.0+incompatible // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.24 // indirect @@ -51,7 +51,7 @@ require ( github.com/armon/go-metrics v0.3.10 // indirect github.com/aws/aws-sdk-go v1.43.32 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect @@ -109,7 +109,7 @@ require ( github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.1 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b // indirect github.com/linode/linodego v1.3.0 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect @@ -218,6 +218,7 @@ require ( require ( github.com/go-kit/kit v0.12.0 // indirect + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/multierr v1.8.0 ) @@ -280,3 +281,5 @@ replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator replace github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal => ../internal/coreinternal replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry => ../pkg/resourcetotelemetry + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/testbed/go.sum b/testbed/go.sum index 4b0ba79af654..a69e82832469 100644 --- a/testbed/go.sum +++ b/testbed/go.sum @@ -54,8 +54,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6/go.mod h1:wN/zk7mhREp/oviagqUXY3EwuHhWyOvAdsn5Y4CzOrc= -contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= -contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/prometheus v0.4.1 h1:oObVeKo2NxpdF/fIfrPsNj6K0Prg0R0mHM+uANlYMiM= +contrib.go.opencensus.io/exporter/prometheus v0.4.1/go.mod h1:t9wvfitlUjGXG2IXAZsuFq26mDGid/JwCEXp+gTG/9U= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= @@ -221,8 +221,9 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3k github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -996,8 +997,8 @@ github.com/klauspost/compress v1.14.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b h1:iNjcivnc6lhbvJA3LD622NPrUponluJrBWPIwGG/3Bg= github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1268,6 +1269,7 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -1355,7 +1357,6 @@ github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvW github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00= github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= @@ -1462,10 +1463,8 @@ github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1560,13 +1559,16 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= go.opentelemetry.io/collector/model v0.44.0/go.mod h1:4jo1R8uBDspLCxUGhQ0k3v/EFXFbW7s0AIy3LuGLbcU= go.opentelemetry.io/collector/model v0.45.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= go.opentelemetry.io/collector/model v0.46.0/go.mod h1:uyiyyq8lV45zrJ94MnLip26sorfNLP6J9XmOvaEmy7w= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d h1:KPtuYJw20fHuzdyEwUo/ON8v2MQaq64LMXMC4G815CA= +go.opentelemetry.io/collector/model v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:MWV2yf/tXWdSrkHex1MHeye+UkM7OgiZd2dqPws2Hes= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= @@ -1595,7 +1597,6 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M= -go.opentelemetry.io/otel/exporters/prometheus v0.28.0/go.mod h1:nN2uGmk/rLmcbPTaZakIMqYH2Q0T8V1sOnKOHe/HLH0= go.opentelemetry.io/otel/exporters/prometheus v0.29.0 h1:jOrFr8pCPj52GCPNq3qd69SEug3QmqDJTzbrefUxkpw= go.opentelemetry.io/otel/exporters/prometheus v0.29.0/go.mod h1:Er2VVJQZbHysogooLNchdZ3MLYoI7+d15mHmrRlRJCU= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= @@ -1608,13 +1609,11 @@ go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= -go.opentelemetry.io/otel/sdk v1.6.0/go.mod h1:PjLRUfDsoPy0zl7yrDGSUqjj43tL7rEtFdCEiGlxXRM= go.opentelemetry.io/otel/sdk v1.6.1/go.mod h1:IVYrddmFZ+eJqu2k38qD3WezFR2pymCzm8tdxyh3R4E= go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ= go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/sdk/metric v0.28.0/go.mod h1:DqJmT0ovBgoW6TJ8CAQyTnwxZPIp3KWtCiDDZ1uHAzU= go.opentelemetry.io/otel/sdk/metric v0.29.0 h1:OCEp2igPFXQrGxSR/nwd/bDjkPlPlOVjIULA/ob0dNw= go.opentelemetry.io/otel/sdk/metric v0.29.0/go.mod h1:IFkFNKI8Gq8zBdqOKdODCL9+LInBZLXaGpqSIKphNuU= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= @@ -1944,7 +1943,6 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1960,7 +1958,6 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/testbed/mockdatareceivers/mockawsxrayreceiver/go.mod b/testbed/mockdatareceivers/mockawsxrayreceiver/go.mod index 16099e9cb1e1..9244ef390eff 100644 --- a/testbed/mockdatareceivers/mockawsxrayreceiver/go.mod +++ b/testbed/mockdatareceivers/mockawsxrayreceiver/go.mod @@ -4,8 +4,8 @@ go 1.17 require ( github.com/gorilla/mux v1.8.0 - go.opentelemetry.io/collector v0.48.0 - go.opentelemetry.io/collector/model v0.48.0 + go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d + go.opentelemetry.io/collector/pdata v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.21.0 ) @@ -13,13 +13,12 @@ require ( require ( github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/knadh/koanf v1.4.0 // indirect + github.com/knadh/koanf v1.4.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/spf13/cast v1.4.1 // indirect go.opencensus.io v0.23.0 // indirect go.opentelemetry.io/otel v1.6.3 // indirect go.opentelemetry.io/otel/metric v0.29.0 // indirect @@ -27,3 +26,5 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d diff --git a/testbed/mockdatareceivers/mockawsxrayreceiver/go.sum b/testbed/mockdatareceivers/mockawsxrayreceiver/go.sum index 9c6f96a672e4..7d0dfc27cfb2 100644 --- a/testbed/mockdatareceivers/mockawsxrayreceiver/go.sum +++ b/testbed/mockdatareceivers/mockawsxrayreceiver/go.sum @@ -95,8 +95,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= -github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= +github.com/knadh/koanf v1.4.1 h1:Z0VGW/uo8NJmjd+L1Dc3S5frq6c62w5xQ9Yf4Mg3wFQ= +github.com/knadh/koanf v1.4.1/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -134,8 +134,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -150,15 +148,15 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/collector v0.48.0 h1:/kUmNzsYgdPmbdscOGtCFPyZvxICrzmCFth2krzJuWs= -go.opentelemetry.io/collector v0.48.0/go.mod h1:iklh3+Npx1DalC6PvEi9ysjx9zLbjgOUQFTIh2MufQU= -go.opentelemetry.io/collector/model v0.48.0 h1:xmN4LdZ92q6PZnaKhMdIlC5KGtPJeOYaWCnA1PQ2oZw= -go.opentelemetry.io/collector/model v0.48.0/go.mod h1:1QVYv8TqsTMt9wVC5BUF9fqMVtk2C5EclWDnuVqdKoU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d h1:zJ2eiD3yIZpEUPej8YmhDKA/K1YUrT+PQ7QO4tOyHuU= +go.opentelemetry.io/collector v0.48.1-0.20220412005140-8eb68f40028d/go.mod h1:gQKSqJkMeVZa/GeH8AOPS97ivf0EvrYTWduS+9jDCsI= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d h1:zNEwnAiGrOhX6s4KQ3Nl9Cz6mCcMbDVvD+z+AWM8F3k= +go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d/go.mod h1:YwmKuiFhNgtmhRdpi8Q8FAWPa0AwJTCSlssSsAtuRcY= go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE= go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI= go.opentelemetry.io/otel/metric v0.29.0 h1:7unM/I13Dbc1VHw8lTPQ7zfNIgkhcb8BZhujXOS4jKc= go.opentelemetry.io/otel/metric v0.29.0/go.mod h1:HahKFp1OC1RNTsuO/HNMBHHJR+dmHZ7wLARRgGDwjLQ= -go.opentelemetry.io/otel/sdk v1.6.1 h1:ZmcNyMhcuAYIb/Nr6QhBPTMopMTbov/47wHt1gibkoY= +go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs= go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc= go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -214,7 +212,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/testbed/mockdatareceivers/mockawsxrayreceiver/trace_receiver.go b/testbed/mockdatareceivers/mockawsxrayreceiver/trace_receiver.go index 6cd8cf557bd8..fd2f265cb95e 100644 --- a/testbed/mockdatareceivers/mockawsxrayreceiver/trace_receiver.go +++ b/testbed/mockdatareceivers/mockawsxrayreceiver/trace_receiver.go @@ -28,8 +28,8 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenterror" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" "go.opentelemetry.io/collector/obsreport" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/zap" ) @@ -134,7 +134,7 @@ func (ar *MockAwsXrayReceiver) Shutdown(context.Context) error { return ar.server.Close() } -func ToTraces(rawSeg []byte) (*pdata.Traces, error) { +func ToTraces(rawSeg []byte) (*ptrace.Traces, error) { var result map[string]interface{} err := json.Unmarshal(rawSeg, &result) if err != nil { @@ -146,7 +146,7 @@ func ToTraces(rawSeg []byte) (*pdata.Traces, error) { panic("Not a slice") } - traceData := pdata.NewTraces() + traceData := ptrace.NewTraces() rspan := traceData.ResourceSpans().AppendEmpty() ils := rspan.ScopeSpans().AppendEmpty() ils.Spans().EnsureCapacity(len(records)) diff --git a/testbed/testbed/data_providers.go b/testbed/testbed/data_providers.go index 75ad3a0cdb6a..cd682576c53a 100644 --- a/testbed/testbed/data_providers.go +++ b/testbed/testbed/data_providers.go @@ -23,8 +23,10 @@ import ( "time" "go.opentelemetry.io/collector/config" - "go.opentelemetry.io/collector/model/otlp" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/atomic" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/goldendataset" @@ -37,11 +39,11 @@ type DataProvider interface { // The data provider implementation should increment these as it generates data. SetLoadGeneratorCounters(dataItemsGenerated *atomic.Uint64) // GenerateTraces returns an internal Traces instance with an OTLP ResourceSpans slice populated with test data. - GenerateTraces() (pdata.Traces, bool) + GenerateTraces() (ptrace.Traces, bool) // GenerateMetrics returns an internal MetricData instance with an OTLP ResourceMetrics slice of test data. - GenerateMetrics() (pdata.Metrics, bool) - // GenerateLogs returns the internal pdata.Logs format - GenerateLogs() (pdata.Logs, bool) + GenerateMetrics() (pmetric.Metrics, bool) + // GenerateLogs returns the internal plog.Logs format + GenerateLogs() (plog.Logs, bool) } // perfTestDataProvider in an implementation of the DataProvider for use in performance tests. @@ -64,8 +66,8 @@ func (dp *perfTestDataProvider) SetLoadGeneratorCounters(dataItemsGenerated *ato dp.dataItemsGenerated = dataItemsGenerated } -func (dp *perfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { - traceData := pdata.NewTraces() +func (dp *perfTestDataProvider) GenerateTraces() (ptrace.Traces, bool) { + traceData := ptrace.NewTraces() spans := traceData.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans() spans.EnsureCapacity(dp.options.ItemsPerBatch) @@ -83,7 +85,7 @@ func (dp *perfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { span.SetTraceID(idutils.UInt64ToTraceID(0, traceID)) span.SetSpanID(idutils.UInt64ToSpanID(spanID)) span.SetName("load-generator-span") - span.SetKind(pdata.SpanKindClient) + span.SetKind(ptrace.SpanKindClient) attrs := span.Attributes() attrs.UpsertInt("load_generator.span_seq_num", int64(spanID)) attrs.UpsertInt("load_generator.trace_seq_num", int64(traceID)) @@ -91,17 +93,17 @@ func (dp *perfTestDataProvider) GenerateTraces() (pdata.Traces, bool) { for k, v := range dp.options.Attributes { attrs.UpsertString(k, v) } - span.SetStartTimestamp(pdata.NewTimestampFromTime(startTime)) - span.SetEndTimestamp(pdata.NewTimestampFromTime(endTime)) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(startTime)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(endTime)) } return traceData, false } -func (dp *perfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { +func (dp *perfTestDataProvider) GenerateMetrics() (pmetric.Metrics, bool) { // Generate 7 data points per metric. const dataPointsPerMetric = 7 - md := pdata.NewMetrics() + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() if dp.options.Attributes != nil { attrs := rm.Resource().Attributes() @@ -118,7 +120,7 @@ func (dp *perfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { metric.SetName("load_generator_" + strconv.Itoa(i)) metric.SetDescription("Load Generator Counter #" + strconv.Itoa(i)) metric.SetUnit("1") - metric.SetDataType(pdata.MetricDataTypeGauge) + metric.SetDataType(pmetric.MetricDataTypeGauge) batchIndex := dp.traceIDSequence.Inc() @@ -127,7 +129,7 @@ func (dp *perfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { dps.EnsureCapacity(dataPointsPerMetric) for j := 0; j < dataPointsPerMetric; j++ { dataPoint := dps.AppendEmpty() - dataPoint.SetStartTimestamp(pdata.NewTimestampFromTime(time.Now())) + dataPoint.SetStartTimestamp(pcommon.NewTimestampFromTime(time.Now())) value := dp.dataItemsGenerated.Inc() dataPoint.SetIntVal(int64(value)) dataPoint.Attributes().InsertString("item_index", "item_"+strconv.Itoa(j)) @@ -137,8 +139,8 @@ func (dp *perfTestDataProvider) GenerateMetrics() (pdata.Metrics, bool) { return md, false } -func (dp *perfTestDataProvider) GenerateLogs() (pdata.Logs, bool) { - logs := pdata.NewLogs() +func (dp *perfTestDataProvider) GenerateLogs() (plog.Logs, bool) { + logs := plog.NewLogs() rl := logs.ResourceLogs().AppendEmpty() if dp.options.Attributes != nil { attrs := rl.Resource().Attributes() @@ -150,14 +152,14 @@ func (dp *perfTestDataProvider) GenerateLogs() (pdata.Logs, bool) { logRecords := rl.ScopeLogs().AppendEmpty().LogRecords() logRecords.EnsureCapacity(dp.options.ItemsPerBatch) - now := pdata.NewTimestampFromTime(time.Now()) + now := pcommon.NewTimestampFromTime(time.Now()) batchIndex := dp.traceIDSequence.Inc() for i := 0; i < dp.options.ItemsPerBatch; i++ { itemIndex := dp.dataItemsGenerated.Inc() record := logRecords.AppendEmpty() - record.SetSeverityNumber(pdata.SeverityNumberINFO3) + record.SetSeverityNumber(plog.SeverityNumberINFO3) record.SetSeverityText("INFO3") record.SetName("load_generator_" + strconv.Itoa(i)) record.Body().SetStringVal("Load Generator Counter #" + strconv.Itoa(i)) @@ -182,11 +184,11 @@ type goldenDataProvider struct { spanPairsFile string dataItemsGenerated *atomic.Uint64 - tracesGenerated []pdata.Traces + tracesGenerated []ptrace.Traces tracesIndex int metricPairsFile string - metricsGenerated []pdata.Metrics + metricsGenerated []pmetric.Metrics metricsIndex int } @@ -204,7 +206,7 @@ func (dp *goldenDataProvider) SetLoadGeneratorCounters(dataItemsGenerated *atomi dp.dataItemsGenerated = dataItemsGenerated } -func (dp *goldenDataProvider) GenerateTraces() (pdata.Traces, bool) { +func (dp *goldenDataProvider) GenerateTraces() (ptrace.Traces, bool) { if dp.tracesGenerated == nil { var err error dp.tracesGenerated, err = goldendataset.GenerateTraces(dp.tracePairsFile, dp.spanPairsFile) @@ -214,7 +216,7 @@ func (dp *goldenDataProvider) GenerateTraces() (pdata.Traces, bool) { } } if dp.tracesIndex >= len(dp.tracesGenerated) { - return pdata.NewTraces(), true + return ptrace.NewTraces(), true } td := dp.tracesGenerated[dp.tracesIndex] dp.tracesIndex++ @@ -222,7 +224,7 @@ func (dp *goldenDataProvider) GenerateTraces() (pdata.Traces, bool) { return td, false } -func (dp *goldenDataProvider) GenerateMetrics() (pdata.Metrics, bool) { +func (dp *goldenDataProvider) GenerateMetrics() (pmetric.Metrics, bool) { if dp.metricsGenerated == nil { var err error dp.metricsGenerated, err = goldendataset.GenerateMetrics(dp.metricPairsFile) @@ -231,7 +233,7 @@ func (dp *goldenDataProvider) GenerateMetrics() (pdata.Metrics, bool) { } } if dp.metricsIndex == len(dp.metricsGenerated) { - return pdata.Metrics{}, true + return pmetric.Metrics{}, true } pdm := dp.metricsGenerated[dp.metricsIndex] dp.metricsIndex++ @@ -239,8 +241,8 @@ func (dp *goldenDataProvider) GenerateMetrics() (pdata.Metrics, bool) { return pdm, false } -func (dp *goldenDataProvider) GenerateLogs() (pdata.Logs, bool) { - return pdata.NewLogs(), true +func (dp *goldenDataProvider) GenerateLogs() (plog.Logs, bool) { + return plog.NewLogs(), true } // FileDataProvider in an implementation of the DataProvider for use in performance tests. @@ -250,9 +252,9 @@ func (dp *goldenDataProvider) GenerateLogs() (pdata.Logs, bool) { // expects just a single JSON message in the entire file). type FileDataProvider struct { dataItemsGenerated *atomic.Uint64 - logs pdata.Logs - metrics pdata.Metrics - traces pdata.Traces + logs plog.Logs + metrics pmetric.Metrics + traces ptrace.Traces ItemsPerBatch int } @@ -273,17 +275,17 @@ func NewFileDataProvider(filePath string, dataType config.DataType) (*FileDataPr // Load the message from the file and count the data points. switch dataType { case config.TracesDataType: - if dp.traces, err = otlp.NewJSONTracesUnmarshaler().UnmarshalTraces(buf); err != nil { + if dp.traces, err = ptrace.NewJSONUnmarshaler().UnmarshalTraces(buf); err != nil { return nil, err } dp.ItemsPerBatch = dp.traces.SpanCount() case config.MetricsDataType: - if dp.metrics, err = otlp.NewJSONMetricsUnmarshaler().UnmarshalMetrics(buf); err != nil { + if dp.metrics, err = pmetric.NewJSONUnmarshaler().UnmarshalMetrics(buf); err != nil { return nil, err } dp.ItemsPerBatch = dp.metrics.DataPointCount() case config.LogsDataType: - if dp.logs, err = otlp.NewJSONLogsUnmarshaler().UnmarshalLogs(buf); err != nil { + if dp.logs, err = plog.NewJSONUnmarshaler().UnmarshalLogs(buf); err != nil { return nil, err } dp.ItemsPerBatch = dp.logs.LogRecordCount() @@ -296,17 +298,17 @@ func (dp *FileDataProvider) SetLoadGeneratorCounters(dataItemsGenerated *atomic. dp.dataItemsGenerated = dataItemsGenerated } -func (dp *FileDataProvider) GenerateTraces() (pdata.Traces, bool) { +func (dp *FileDataProvider) GenerateTraces() (ptrace.Traces, bool) { dp.dataItemsGenerated.Add(uint64(dp.ItemsPerBatch)) return dp.traces, false } -func (dp *FileDataProvider) GenerateMetrics() (pdata.Metrics, bool) { +func (dp *FileDataProvider) GenerateMetrics() (pmetric.Metrics, bool) { dp.dataItemsGenerated.Add(uint64(dp.ItemsPerBatch)) return dp.metrics, false } -func (dp *FileDataProvider) GenerateLogs() (pdata.Logs, bool) { +func (dp *FileDataProvider) GenerateLogs() (plog.Logs, bool) { dp.dataItemsGenerated.Add(uint64(dp.ItemsPerBatch)) return dp.logs, false } diff --git a/testbed/testbed/data_providers_test.go b/testbed/testbed/data_providers_test.go index 7bd5b2d7474d..9d7cd394a587 100644 --- a/testbed/testbed/data_providers_test.go +++ b/testbed/testbed/data_providers_test.go @@ -18,7 +18,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/atomic" ) @@ -27,7 +27,7 @@ const metricsPictPairsFile = "../../internal/goldendataset/testdata/generated_pi func TestGoldenDataProvider(t *testing.T) { dp := NewGoldenDataProvider("", "", metricsPictPairsFile) dp.SetLoadGeneratorCounters(atomic.NewUint64(0)) - var ms []pdata.Metrics + var ms []pmetric.Metrics for { m, done := dp.GenerateMetrics() if done { diff --git a/testbed/testbed/in_process_collector.go b/testbed/testbed/in_process_collector.go index 264af055bc75..55dc44d7403e 100644 --- a/testbed/testbed/in_process_collector.go +++ b/testbed/testbed/in_process_collector.go @@ -68,9 +68,10 @@ func (ipp *inProcessCollector) Start(args StartParams) error { ipp.configFile = confFile.Name() settings := service.CollectorSettings{ - BuildInfo: component.NewDefaultBuildInfo(), - Factories: ipp.factories, - ConfigProvider: service.MustNewDefaultConfigProvider([]string{ipp.configFile}, nil), + BuildInfo: component.NewDefaultBuildInfo(), + Factories: ipp.factories, + // TODO: Replace with NewConfigProvider + ConfigProvider: service.MustNewDefaultConfigProvider([]string{ipp.configFile}, nil), // nolint:staticcheck } ipp.svc, err = service.New(settings) diff --git a/testbed/testbed/mock_backend.go b/testbed/testbed/mock_backend.go index c6ae9238a355..f4c37f7daffc 100644 --- a/testbed/testbed/mock_backend.go +++ b/testbed/testbed/mock_backend.go @@ -22,7 +22,9 @@ import ( "time" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/atomic" ) @@ -47,9 +49,9 @@ type MockBackend struct { // Recording fields. isRecording bool recordMutex sync.Mutex - ReceivedTraces []pdata.Traces - ReceivedMetrics []pdata.Metrics - ReceivedLogs []pdata.Logs + ReceivedTraces []ptrace.Traces + ReceivedMetrics []pmetric.Metrics + ReceivedLogs []plog.Logs } // NewMockBackend creates a new mock backend that receives data using specified receiver. @@ -134,7 +136,7 @@ func (mb *MockBackend) ClearReceivedItems() { mb.ReceivedLogs = nil } -func (mb *MockBackend) ConsumeTrace(td pdata.Traces) { +func (mb *MockBackend) ConsumeTrace(td ptrace.Traces) { mb.recordMutex.Lock() defer mb.recordMutex.Unlock() if mb.isRecording { @@ -142,7 +144,7 @@ func (mb *MockBackend) ConsumeTrace(td pdata.Traces) { } } -func (mb *MockBackend) ConsumeMetric(md pdata.Metrics) { +func (mb *MockBackend) ConsumeMetric(md pmetric.Metrics) { mb.recordMutex.Lock() defer mb.recordMutex.Unlock() if mb.isRecording { @@ -152,7 +154,7 @@ func (mb *MockBackend) ConsumeMetric(md pdata.Metrics) { var _ consumer.Traces = (*MockTraceConsumer)(nil) -func (mb *MockBackend) ConsumeLogs(ld pdata.Logs) { +func (mb *MockBackend) ConsumeLogs(ld plog.Logs) { mb.recordMutex.Lock() defer mb.recordMutex.Unlock() if mb.isRecording { @@ -169,7 +171,7 @@ func (tc *MockTraceConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (tc *MockTraceConsumer) ConsumeTraces(_ context.Context, td pdata.Traces) error { +func (tc *MockTraceConsumer) ConsumeTraces(_ context.Context, td ptrace.Traces) error { tc.numSpansReceived.Add(uint64(td.SpanCount())) rs := td.ResourceSpans() @@ -216,7 +218,7 @@ func (mc *MockMetricConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (mc *MockMetricConsumer) ConsumeMetrics(_ context.Context, md pdata.Metrics) error { +func (mc *MockMetricConsumer) ConsumeMetrics(_ context.Context, md pmetric.Metrics) error { mc.numMetricsReceived.Add(uint64(md.DataPointCount())) mc.backend.ConsumeMetric(md) return nil @@ -241,7 +243,7 @@ func (lc *MockLogConsumer) Capabilities() consumer.Capabilities { return consumer.Capabilities{MutatesData: false} } -func (lc *MockLogConsumer) ConsumeLogs(_ context.Context, ld pdata.Logs) error { +func (lc *MockLogConsumer) ConsumeLogs(_ context.Context, ld plog.Logs) error { recordCount := ld.LogRecordCount() lc.numLogRecordsReceived.Add(uint64(recordCount)) lc.backend.ConsumeLogs(ld) diff --git a/testbed/testbed/validator.go b/testbed/testbed/validator.go index a58188026c2a..c956b94d141e 100644 --- a/testbed/testbed/validator.go +++ b/testbed/testbed/validator.go @@ -22,7 +22,8 @@ import ( "time" "github.com/stretchr/testify/assert" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" ) // TestCaseValidator defines the interface for validating and reporting test results. @@ -122,8 +123,8 @@ func (v *CorrectnessTestValidator) RecordResults(tc *TestCase) { }) } -func (v *CorrectnessTestValidator) assertSentRecdTracingDataEqual(tracesList []pdata.Traces) { - spansMap := make(map[string]pdata.Span) +func (v *CorrectnessTestValidator) assertSentRecdTracingDataEqual(tracesList []ptrace.Traces) { + spansMap := make(map[string]ptrace.Span) // TODO: Remove this hack, and add a way to retrieve all sent data. if val, ok := v.dataProvider.(*goldenDataProvider); ok { populateSpansMap(spansMap, val.tracesGenerated) @@ -145,7 +146,7 @@ func (v *CorrectnessTestValidator) assertSentRecdTracingDataEqual(tracesList []p } } -func (v *CorrectnessTestValidator) diffSpan(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpan(sentSpan ptrace.Span, recdSpan ptrace.Span) { v.diffSpanTraceID(sentSpan, recdSpan) v.diffSpanSpanID(sentSpan, recdSpan) v.diffSpanTraceState(sentSpan, recdSpan) @@ -159,7 +160,7 @@ func (v *CorrectnessTestValidator) diffSpan(sentSpan pdata.Span, recdSpan pdata. v.diffSpanStatus(sentSpan, recdSpan) } -func (v *CorrectnessTestValidator) diffSpanTraceID(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanTraceID(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.TraceID().HexString() != recdSpan.TraceID().HexString() { af := &TraceAssertionFailure{ typeName: "Span", @@ -172,7 +173,7 @@ func (v *CorrectnessTestValidator) diffSpanTraceID(sentSpan pdata.Span, recdSpan } } -func (v *CorrectnessTestValidator) diffSpanSpanID(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanSpanID(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.SpanID().HexString() != recdSpan.SpanID().HexString() { af := &TraceAssertionFailure{ typeName: "Span", @@ -185,7 +186,7 @@ func (v *CorrectnessTestValidator) diffSpanSpanID(sentSpan pdata.Span, recdSpan } } -func (v *CorrectnessTestValidator) diffSpanTraceState(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanTraceState(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.TraceState() != recdSpan.TraceState() { af := &TraceAssertionFailure{ typeName: "Span", @@ -198,7 +199,7 @@ func (v *CorrectnessTestValidator) diffSpanTraceState(sentSpan pdata.Span, recdS } } -func (v *CorrectnessTestValidator) diffSpanParentSpanID(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanParentSpanID(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.ParentSpanID().HexString() != recdSpan.ParentSpanID().HexString() { af := &TraceAssertionFailure{ typeName: "Span", @@ -211,7 +212,7 @@ func (v *CorrectnessTestValidator) diffSpanParentSpanID(sentSpan pdata.Span, rec } } -func (v *CorrectnessTestValidator) diffSpanName(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanName(sentSpan ptrace.Span, recdSpan ptrace.Span) { // Because of https://github.com/openzipkin/zipkin-go/pull/166 compare lower cases. if !strings.EqualFold(sentSpan.Name(), recdSpan.Name()) { af := &TraceAssertionFailure{ @@ -225,7 +226,7 @@ func (v *CorrectnessTestValidator) diffSpanName(sentSpan pdata.Span, recdSpan pd } } -func (v *CorrectnessTestValidator) diffSpanKind(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanKind(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.Kind() != recdSpan.Kind() { af := &TraceAssertionFailure{ typeName: "Span", @@ -238,7 +239,7 @@ func (v *CorrectnessTestValidator) diffSpanKind(sentSpan pdata.Span, recdSpan pd } } -func (v *CorrectnessTestValidator) diffSpanTimestamps(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanTimestamps(sentSpan ptrace.Span, recdSpan ptrace.Span) { if notWithinOneMillisecond(sentSpan.StartTimestamp(), recdSpan.StartTimestamp()) { af := &TraceAssertionFailure{ typeName: "Span", @@ -261,7 +262,7 @@ func (v *CorrectnessTestValidator) diffSpanTimestamps(sentSpan pdata.Span, recdS } } -func (v *CorrectnessTestValidator) diffSpanAttributes(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanAttributes(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.Attributes().Len() != recdSpan.Attributes().Len() { af := &TraceAssertionFailure{ typeName: "Span", @@ -286,7 +287,7 @@ func (v *CorrectnessTestValidator) diffSpanAttributes(sentSpan pdata.Span, recdS } } -func (v *CorrectnessTestValidator) diffSpanEvents(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanEvents(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.Events().Len() != recdSpan.Events().Len() { af := &TraceAssertionFailure{ typeName: "Span", @@ -344,7 +345,7 @@ func (v *CorrectnessTestValidator) diffSpanEvents(sentSpan pdata.Span, recdSpan } } -func (v *CorrectnessTestValidator) diffSpanLinks(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanLinks(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.Links().Len() != recdSpan.Links().Len() { af := &TraceAssertionFailure{ typeName: "Span", @@ -401,7 +402,7 @@ func (v *CorrectnessTestValidator) diffSpanLinks(sentSpan pdata.Span, recdSpan p } } -func (v *CorrectnessTestValidator) diffSpanStatus(sentSpan pdata.Span, recdSpan pdata.Span) { +func (v *CorrectnessTestValidator) diffSpanStatus(sentSpan ptrace.Span, recdSpan ptrace.Span) { if sentSpan.Status().Code() != recdSpan.Status().Code() { af := &TraceAssertionFailure{ typeName: "Span", @@ -415,8 +416,8 @@ func (v *CorrectnessTestValidator) diffSpanStatus(sentSpan pdata.Span, recdSpan } func (v *CorrectnessTestValidator) diffAttributeMap(spanName string, - sentAttrs pdata.Map, recdAttrs pdata.Map, fmtStr string) { - sentAttrs.Range(func(sentKey string, sentVal pdata.Value) bool { + sentAttrs pcommon.Map, recdAttrs pcommon.Map, fmtStr string) { + sentAttrs.Range(func(sentKey string, sentVal pcommon.Value) bool { recdVal, ok := recdAttrs.Get(sentKey) if !ok { af := &TraceAssertionFailure{ @@ -430,7 +431,7 @@ func (v *CorrectnessTestValidator) diffAttributeMap(spanName string, return true } switch sentVal.Type() { - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: v.compareKeyValueList(spanName, sentVal, recdVal, fmtStr, sentKey) default: v.compareSimpleValues(spanName, sentVal, recdVal, fmtStr, sentKey) @@ -439,7 +440,7 @@ func (v *CorrectnessTestValidator) diffAttributeMap(spanName string, }) } -func (v *CorrectnessTestValidator) compareSimpleValues(spanName string, sentVal pdata.Value, recdVal pdata.Value, +func (v *CorrectnessTestValidator) compareSimpleValues(spanName string, sentVal pcommon.Value, recdVal pcommon.Value, fmtStr string, attrKey string) { if !sentVal.Equal(recdVal) { sentStr := sentVal.AsString() @@ -458,11 +459,11 @@ func (v *CorrectnessTestValidator) compareSimpleValues(spanName string, sentVal } func (v *CorrectnessTestValidator) compareKeyValueList( - spanName string, sentVal pdata.Value, recdVal pdata.Value, fmtStr string, attrKey string) { + spanName string, sentVal pcommon.Value, recdVal pcommon.Value, fmtStr string, attrKey string) { switch recdVal.Type() { - case pdata.ValueTypeMap: + case pcommon.ValueTypeMap: v.diffAttributeMap(spanName, sentVal.MapVal(), recdVal.MapVal(), fmtStr) - case pdata.ValueTypeString: + case pcommon.ValueTypeString: v.compareSimpleValues(spanName, sentVal, recdVal, fmtStr, attrKey) default: af := &TraceAssertionFailure{ @@ -476,8 +477,8 @@ func (v *CorrectnessTestValidator) compareKeyValueList( } } -func convertEventsSliceToMap(events pdata.SpanEventSlice) map[string][]pdata.SpanEvent { - eventMap := make(map[string][]pdata.SpanEvent) +func convertEventsSliceToMap(events ptrace.SpanEventSlice) map[string][]ptrace.SpanEvent { + eventMap := make(map[string][]ptrace.SpanEvent) for i := 0; i < events.Len(); i++ { event := events.At(i) eventMap[event.Name()] = append(eventMap[event.Name()], event) @@ -488,12 +489,12 @@ func convertEventsSliceToMap(events pdata.SpanEventSlice) map[string][]pdata.Spa return eventMap } -func sortEventsByTimestamp(eventList []pdata.SpanEvent) { +func sortEventsByTimestamp(eventList []ptrace.SpanEvent) { sort.SliceStable(eventList, func(i, j int) bool { return eventList[i].Timestamp() < eventList[j].Timestamp() }) } -func convertLinksSliceToMap(links pdata.SpanLinkSlice) map[string]pdata.SpanLink { - linkMap := make(map[string]pdata.SpanLink) +func convertLinksSliceToMap(links ptrace.SpanLinkSlice) map[string]ptrace.SpanLink { + linkMap := make(map[string]ptrace.SpanLink) for i := 0; i < links.Len(); i++ { link := links.At(i) linkMap[traceIDAndSpanIDToString(link.TraceID(), link.SpanID())] = link @@ -501,8 +502,8 @@ func convertLinksSliceToMap(links pdata.SpanLinkSlice) map[string]pdata.SpanLink return linkMap } -func notWithinOneMillisecond(sentNs pdata.Timestamp, recdNs pdata.Timestamp) bool { - var diff pdata.Timestamp +func notWithinOneMillisecond(sentNs pcommon.Timestamp, recdNs pcommon.Timestamp) bool { + var diff pcommon.Timestamp if sentNs > recdNs { diff = sentNs - recdNs } else { @@ -511,7 +512,7 @@ func notWithinOneMillisecond(sentNs pdata.Timestamp, recdNs pdata.Timestamp) boo return diff > 1100000 } -func populateSpansMap(spansMap map[string]pdata.Span, tds []pdata.Traces) { +func populateSpansMap(spansMap map[string]ptrace.Span, tds []ptrace.Traces) { for _, td := range tds { rss := td.ResourceSpans() for i := 0; i < rss.Len(); i++ { @@ -528,6 +529,6 @@ func populateSpansMap(spansMap map[string]pdata.Span, tds []pdata.Traces) { } } -func traceIDAndSpanIDToString(traceID pdata.TraceID, spanID pdata.SpanID) string { +func traceIDAndSpanIDToString(traceID pcommon.TraceID, spanID pcommon.SpanID) string { return fmt.Sprintf("%s-%s", traceID.HexString(), spanID.HexString()) } diff --git a/testbed/tests/resource_processor_test.go b/testbed/tests/resource_processor_test.go index b531d1544d61..c03c1cb125cd 100644 --- a/testbed/tests/resource_processor_test.go +++ b/testbed/tests/resource_processor_test.go @@ -20,14 +20,14 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" + "go.opentelemetry.io/collector/pdata/pmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/testbed" ) var ( - mockedConsumedResourceWithType = func() pdata.Metrics { - md := pdata.NewMetrics() + mockedConsumedResourceWithType = func() pmetric.Metrics { + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().UpsertString("opencensus.resourcetype", "host") rm.Resource().Attributes().UpsertString("label-key", "label-value") @@ -35,19 +35,19 @@ var ( m.SetName("metric-name") m.SetDescription("metric-description") m.SetUnit("metric-unit") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) m.Gauge().DataPoints().AppendEmpty().SetIntVal(0) return md }() - mockedConsumedResourceEmpty = func() pdata.Metrics { - md := pdata.NewMetrics() + mockedConsumedResourceEmpty = func() pmetric.Metrics { + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() m := rm.ScopeMetrics().AppendEmpty().Metrics().AppendEmpty() m.SetName("metric-name") m.SetDescription("metric-description") m.SetUnit("metric-unit") - m.SetDataType(pdata.MetricDataTypeGauge) + m.SetDataType(pmetric.MetricDataTypeGauge) m.Gauge().DataPoints().AppendEmpty().SetIntVal(0) return md }() @@ -56,8 +56,8 @@ var ( type resourceProcessorTestCase struct { name string resourceProcessorConfig string - mockedConsumedMetrics pdata.Metrics - expectedMetrics pdata.Metrics + mockedConsumedMetrics pmetric.Metrics + expectedMetrics pmetric.Metrics } func getResourceProcessorTestCases() []resourceProcessorTestCase { @@ -78,8 +78,8 @@ func getResourceProcessorTestCases() []resourceProcessorTestCase { action: delete `, mockedConsumedMetrics: mockedConsumedResourceWithType, - expectedMetrics: func() pdata.Metrics { - md := pdata.NewMetrics() + expectedMetrics: func() pmetric.Metrics { + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().UpsertString("resource-type", "host") rm.Resource().Attributes().UpsertString("label-key", "new-label-value") @@ -97,8 +97,8 @@ func getResourceProcessorTestCases() []resourceProcessorTestCase { `, mockedConsumedMetrics: mockedConsumedResourceEmpty, - expectedMetrics: func() pdata.Metrics { - md := pdata.NewMetrics() + expectedMetrics: func() pmetric.Metrics { + md := pmetric.NewMetrics() rm := md.ResourceMetrics().AppendEmpty() rm.Resource().Attributes().UpsertString("additional-label-key", "additional-label-value") return md diff --git a/testbed/tests/trace_test.go b/testbed/tests/trace_test.go index dd7704cfc01e..16d49f3469e8 100644 --- a/testbed/tests/trace_test.go +++ b/testbed/tests/trace_test.go @@ -27,8 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.opentelemetry.io/collector/model/pdata" conventions "go.opentelemetry.io/collector/model/semconv/v1.6.1" + "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/idutils" "github.com/open-telemetry/opentelemetry-collector-contrib/testbed/datareceivers" @@ -335,7 +335,7 @@ func verifySingleSpan( tc *testbed.TestCase, serviceName string, spanName string, - verifyReceived func(span pdata.Span), + verifyReceived func(span ptrace.Span), ) { // Clear previously received traces. @@ -343,7 +343,7 @@ func verifySingleSpan( startCounter := tc.MockBackend.DataItemsReceived() // Send one span. - td := pdata.NewTraces() + td := ptrace.NewTraces() rs := td.ResourceSpans().AppendEmpty() rs.Resource().Attributes().InsertString(conventions.AttributeServiceName, serviceName) span := rs.ScopeSpans().AppendEmpty().Spans().AppendEmpty() @@ -454,7 +454,7 @@ func TestTraceAttributesProcessor(t *testing.T) { nodeToInclude := "service-to-add-attr" // verifySpan verifies that attributes was added to the internal data span. - verifySpan := func(span pdata.Span) { + verifySpan := func(span ptrace.Span) { require.NotNil(t, span) require.Equal(t, span.Attributes().Len(), 1) attrVal, ok := span.Attributes().Get("new_attr") @@ -467,14 +467,14 @@ func TestTraceAttributesProcessor(t *testing.T) { // Create a service name that does not match "include" filter. nodeToExclude := "service-not-to-add-attr" - verifySingleSpan(t, tc, nodeToExclude, spanToInclude, func(span pdata.Span) { + verifySingleSpan(t, tc, nodeToExclude, spanToInclude, func(span ptrace.Span) { // Verify attributes was not added to the new internal data span. assert.Equal(t, span.Attributes().Len(), 0) }) // Create another span that does not match "include" filter. spanToExclude := "span-not-to-add-attr" - verifySingleSpan(t, tc, nodeToInclude, spanToExclude, func(span pdata.Span) { + verifySingleSpan(t, tc, nodeToInclude, spanToExclude, func(span ptrace.Span) { // Verify attributes was not added to the new internal data span. assert.Equal(t, span.Attributes().Len(), 0) }) diff --git a/tracegen/go.mod b/tracegen/go.mod index 1f77953c6802..3608122f7299 100644 --- a/tracegen/go.mod +++ b/tracegen/go.mod @@ -35,3 +35,5 @@ require ( google.golang.org/protobuf v1.28.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect ) + +replace go.opentelemetry.io/collector/pdata => go.opentelemetry.io/collector/pdata v0.0.0-20220412005140-8eb68f40028d