diff --git a/.golangci.yml b/.golangci.yml
index 206e53d24c..a9fd5f6e54 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -34,7 +34,8 @@ run:
# won't be reported. Default value is empty list, but there is
# no need to include all autogenerated files, we confidently recognize
# autogenerated files. If it's not please let us know.
- skip-files: []
+ skip-files:
+ - ".*_gen.go$"
# by default isn't set. If set we pass it to "go list -mod={option}". From "go help modules":
# If invoked with -mod=readonly, the go command is disallowed from the implicit
@@ -113,7 +114,7 @@ linters-settings:
lll:
# max line length, lines longer will be reported. Default is 120.
# '\t' is counted as 1 character by default, and can be changed with the tab-width option
- line-length: 100
+ line-length: 120
# tab width in spaces. Default to 1.
tab-width: 1
unused:
diff --git a/go.mod b/go.mod
index 6dd0152e39..c737a5f4de 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,6 @@ require (
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
github.com/CAFxX/gcnotifier v0.0.0-20190112062741-224a280d589d // indirect
github.com/DataDog/datadog-go v3.7.1+incompatible // indirect
- github.com/Masterminds/semver v1.5.0 // indirect
github.com/MichaelTJones/pcg v0.0.0-20180122055547-df440c6ed7ed
github.com/Microsoft/go-winio v0.4.14 // indirect
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect
@@ -23,7 +22,6 @@ require (
github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb // indirect
github.com/davecgh/go-spew v1.1.1
github.com/docker/go-connections v0.4.0 // indirect
- github.com/fatih/color v1.10.0 // indirect
github.com/fortytw2/leaktest v1.2.1-0.20180901000122-b433bbd6d743
github.com/fossas/fossa-cli v1.0.30
github.com/garethr/kubeval v0.0.0-20180821130434-c44f5193dc94
@@ -36,6 +34,7 @@ require (
github.com/golang/mock v1.4.4
github.com/golang/protobuf v1.4.2
github.com/golang/snappy v0.0.1
+ github.com/golangci/golangci-lint v1.33.0
github.com/google/go-cmp v0.5.2
github.com/google/go-jsonnet v0.16.0
github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f // indirect
@@ -47,7 +46,6 @@ require (
github.com/influxdata/influxdb v1.7.7
github.com/jhump/protoreflect v1.6.1
github.com/json-iterator/go v1.1.9
- github.com/kr/text v0.2.0 // indirect
github.com/leanovate/gopter v0.2.8
github.com/lib/pq v1.6.0 // indirect
github.com/lightstep/lightstep-tracer-go v0.18.1
@@ -66,9 +64,6 @@ require (
github.com/m3dbx/vellum v0.0.0-20201119082309-5b47f7a70f69
github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885
github.com/mjibson/esc v0.1.0
- github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
- github.com/onsi/ginkgo v1.14.1 // indirect
- github.com/onsi/gomega v1.10.2 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/opencontainers/runc v0.1.1 // indirect
github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9
@@ -92,7 +87,6 @@ require (
github.com/rveen/ogdl v0.0.0-20200522080342-eeeda1a978e7 // indirect
github.com/sergi/go-diff v1.1.0
github.com/shirou/gopsutil v2.20.5+incompatible // indirect
- github.com/sirupsen/logrus v1.7.0 // indirect
github.com/spf13/cast v1.3.1-0.20190531151931-f31dc0aaab5a // indirect
github.com/spf13/cobra v1.1.1
github.com/spf13/jwalterweatherman v1.1.0 // indirect
@@ -123,7 +117,6 @@ require (
golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634
golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752
google.golang.org/grpc v1.29.1
- gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
gopkg.in/go-ini/ini.v1 v1.57.0 // indirect
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
gopkg.in/go-playground/validator.v9 v9.7.0
@@ -133,7 +126,6 @@ require (
gopkg.in/vmihailenco/msgpack.v2 v2.8.3
gopkg.in/yaml.v2 v2.3.0
gotest.tools v2.2.0+incompatible
- honnef.co/go/tools v0.0.1-2020.1.6 // indirect
)
// branch 0.9.3-pool-read-binary-3
diff --git a/go.sum b/go.sum
index 9ea86511f2..7432ab12a1 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,5 @@
+4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw=
+4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo=
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
@@ -37,6 +39,8 @@ github.com/CAFxX/gcnotifier v0.0.0-20190112062741-224a280d589d/go.mod h1:Rn2zM2M
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.7.1+incompatible h1:HmA9qHVrHIAqpSvoCYJ+c6qst0lgqEhNW6/KwfkHbS8=
github.com/DataDog/datadog-go v3.7.1+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5 h1:XTrzB+F8+SpRmbhAH8HLxhiiG6nYNwaBZjrFps1oWEk=
+github.com/Djarvur/go-err113 v0.0.0-20200511133814-5174e21577d5/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
@@ -51,6 +55,8 @@ github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
+github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQee8Us=
+github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -74,6 +80,7 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5 h1:P5U+E4x5OkVEKQDklVPmzs71WM56RTTRqV4OrDC//Y4=
github.com/alexbrainman/sspi v0.0.0-20180613141037-e580b900e9f5/go.mod h1:976q2ETgjT2snVCf2ZaBnyBbVoPERGjUz+0sofzEfro=
+github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
@@ -112,6 +119,8 @@ github.com/bmatcuk/doublestar v1.3.1 h1:rT8rxDPsavp9G+4ZULzqhhUSaI/OPsTZNG88Z3i0
github.com/bmatcuk/doublestar v1.3.1/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b h1:AP/Y7sqYicnjGDfD5VcY4CIfh1hRXBUavxrvELjTiOE=
github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q=
+github.com/bombsimon/wsl/v3 v3.1.0 h1:E5SRssoBgtVFPcYWUOFJEcgaySgdtTNYzsSKDOY7ss8=
+github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc=
github.com/briandowns/spinner v1.11.1 h1:OixPqDEcX3juo5AjQZAnFPbeUA0jvkp2qzB5gOZJ/L0=
github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae h1:2Zmk+8cNvAGuY8AyvZuWpUdpQUAXwfom4ReVMe/CTIo=
@@ -164,6 +173,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/daixiang0/gci v0.2.4 h1:BUCKk5nlK2m+kRIsoj+wb/5hazHvHeZieBKWd9Afa8Q=
+github.com/daixiang0/gci v0.2.4/go.mod h1:+AV8KmHTGxxwp/pY84TLQfFKp2vuKXXJVzF3kD/hfR4=
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -173,6 +184,8 @@ github.com/daviddengcn/go-assert v0.0.0-20150305222929-ba7e68aeeff6 h1:OPIYL/VhQ
github.com/daviddengcn/go-assert v0.0.0-20150305222929-ba7e68aeeff6/go.mod h1:N+OekMaElW3rSAfDdNX6Dff3HS237/OhC08jYFW4oCw=
github.com/daviddengcn/go-villa v0.0.0-20160111144444-3f35da8ba982 h1:2Trx4ntMtxmus9nN2w1PIqJOI8jB3RjlnDnFm/ImlIU=
github.com/daviddengcn/go-villa v0.0.0-20160111144444-3f35da8ba982/go.mod h1:U8xNoHcXfPnZzy9zCxeKRjaJgC1d3613rFHjZVVAqKc=
+github.com/denis-tingajkin/go-header v0.3.1 h1:ymEpSiFjeItCy1FOP+x0M2KdCELdEAHUsNa8F+hHc6w=
+github.com/denis-tingajkin/go-header v0.3.1/go.mod h1:sq/2IxMhaZX+RRcgHfCRx/m0M5na0fBt4/CRe7Lrji0=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@@ -234,6 +247,8 @@ github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/gnewton/jargo v0.0.0-20150417131352-41f5f186a805 h1:rLZXvVgFIon3lI+v9IL8t1AmG9/yLMSRB5LQ0frn+6Q=
github.com/gnewton/jargo v0.0.0-20150417131352-41f5f186a805/go.mod h1:x+HLDnZexLq1FmhrdgFf4c3EWGbqhU3ITvISBFyzvRo=
+github.com/go-critic/go-critic v0.5.2 h1:3RJdgf6u4NZUumoP8nzbqiiNT8e1tC2Oc7jlgqre/IA=
+github.com/go-critic/go-critic v0.5.2/go.mod h1:cc0+HvdE3lFpqLecgqMaJcvWWH77sLdBp+wLGPM1Yyo=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -293,6 +308,30 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g=
+github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
+github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8=
+github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
+github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ=
+github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
+github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k=
+github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
+github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
+github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg=
+github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
+github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg=
+github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
+github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4=
+github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
+github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk=
+github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo=
+github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=
+github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
@@ -328,6 +367,34 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0=
+github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM=
+github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
+github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 h1:YYWNAGTKWhKpcLLt7aSj/odlKrSrelQwlovBpDuf19w=
+github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
+github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw=
+github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
+github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d h1:pXTK/gkVNs7Zyy7WKgLXmpQ5bHTrq5GDsp8R9Qs67g0=
+github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
+github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks=
+github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
+github.com/golangci/golangci-lint v1.33.0 h1:/o4OtOR3Idim4FHKBJXcy+6ZjNDm82gwK/v6+gWyH9U=
+github.com/golangci/golangci-lint v1.33.0/go.mod h1:zMnMLSCaDlrXExYsuq2LOweE9CHVqYk5jexk23UsjYM=
+github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI=
+github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
+github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
+github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
+github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
+github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
+github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk=
+github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
+github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us=
+github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
+github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg=
+github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
+github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
+github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450 h1:7xqw01UYS+KCI25bMrPxwNYkSns2Db1ziQPpVq99FpE=
github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995 h1:f5gsjBiF9tRRVomCvrkGMMWI8W1f2OBFar2c5oakAP0=
@@ -344,6 +411,7 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github/v30 v30.1.0 h1:VLDx+UolQICEOKu2m4uAoMti1SxuEBAl7RSEG16L+Oo=
@@ -366,6 +434,7 @@ github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4Mgq
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/gookit/color v1.3.1/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gophercloud/gophercloud v0.8.0/go.mod h1:Kc/QKr9thLKruO/dG0szY8kRIYS+iENz0ziI0hJf76A=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -385,6 +454,12 @@ github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/z
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gostaticanalysis/analysisutil v0.1.0 h1:E4c8Y1EQURbBEAHoXc/jBTK7Np14ArT8NPUiSFOl9yc=
+github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw=
+github.com/gostaticanalysis/comment v1.3.0 h1:wTVgynbFu8/nz6SGgywA0TcyIoAVsYc7ai/Zp5xNGlw=
+github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI=
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
@@ -480,9 +555,17 @@ github.com/jcmturner/gokrb5/v8 v8.2.0/go.mod h1:T1hnNppQsBtxW0tCHMHTkAt8n/sABdzZ
github.com/jcmturner/rpc/v2 v2.0.2 h1:gMB4IwRXYsWw4Bc6o/az2HJgFUA1ffSh90i26ZJ6Xl0=
github.com/jcmturner/rpc/v2 v2.0.2/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jgautheron/goconst v0.0.0-20201117150253-ccae5bf973f3 h1:7nkB9fLPMwtn/R6qfPcHileL/x9ydlhw8XyDrLI1ZXg=
+github.com/jgautheron/goconst v0.0.0-20201117150253-ccae5bf973f3/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
github.com/jhump/protoreflect v1.6.1 h1:4/2yi5LyDPP7nN+Hiird1SAJ6YoxUm13/oxHGRnbPd8=
github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4=
+github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a h1:GmsqmapfzSJkm28dhRoHz2tLRbJmqhU86IPgBtN3mmk=
+github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s=
+github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3 h1:jNYPNLe3d8smommaoQlK7LOA5ESyUJJ+Wf79ZtA7Vp4=
+github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
@@ -503,8 +586,11 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
@@ -516,15 +602,21 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/kunwardeep/paralleltest v1.0.2 h1:/jJRv0TiqPoEy/Y8dQxCFJhD56uS/pnvtatgTZBHokU=
+github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2GhLxSfY+oqUsvG30=
github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M=
+github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg=
github.com/leanovate/gopter v0.2.8 h1:eFPtJ3aa5zLfbxGROSNY75T9Dume60CWBAqoWQ3h/ig=
github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.6.0 h1:I5DPxhYJChW9KYc66se+oKFFQX6VuQrKiprsX6ivRZc=
github.com/lib/pq v1.6.0/go.mod h1:4vXEAYvW1fRQ2/FhZ78H73A60MHw1geSm145z2mdY1g=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743 h1:143Bb8f8DuGWck/xpNUOckBVYfFbBTnLevfRZ1aVVqo=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1 h1:vi1F1IQ8N7hNWytK9DpJsUfQhGuNSc19z330K6vl4zk=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
+github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/m3db/bitset v2.0.0+incompatible h1:wMgri1Z2QSwJ8K/7ZuV7vE4feLOT7EofVC8RakIOybI=
github.com/m3db/bitset v2.0.0+incompatible/go.mod h1:X8CCqZmZxs2O6d4qHhiqtAKCin4G5mScPhiwX9rsc5c=
@@ -567,6 +659,10 @@ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/maratori/testpackage v1.0.1 h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=
+github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU=
+github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb h1:RHba4YImhrUVQDHUCe2BNSOz4tVy2yGyXhvYDvxGgeE=
+github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
@@ -584,10 +680,14 @@ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHX
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885 h1:nCU/HIvsORu8nlebFTTkEpxao5zA/yt5Y4yQccm34bM=
github.com/mauricelam/genny v0.0.0-20180903214747-eb2c5232c885/go.mod h1:wRyVMWiOZeVj+MieWS5tIBBtJ3RtqqMbPsA5Z+t5b5U=
+github.com/mbilski/exhaustivestruct v1.1.0 h1:4ykwscnAFeHJruT+EY3M3vdeP8uXMh0VV2E61iR7XD8=
+github.com/mbilski/exhaustivestruct v1.1.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
@@ -597,6 +697,7 @@ github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceT
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
@@ -616,12 +717,17 @@ github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lN
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4=
+github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k=
+github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw=
+github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
@@ -629,8 +735,12 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
+github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nishanths/exhaustive v0.1.0 h1:kVlMw8h2LHPMGUVqUj6230oQjjTMFjwcZrnkhXzFfl8=
+github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
@@ -696,6 +806,8 @@ github.com/pelletier/go-toml v1.5.0 h1:5BakdOZdtKJ1FFk6QdL8iSGrMWsXgchNJcrnarjbm
github.com/pelletier/go-toml v1.5.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
+github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
@@ -716,6 +828,8 @@ github.com/pointlander/jetset v1.0.0 h1:bNlaNAX7cDPID9SlcogmXlDWq0KcRJSpKwHXaAM3
github.com/pointlander/jetset v1.0.0/go.mod h1:zY6+WHRPB10uzTajloHtybSicLW1bf6Rz0eSaU9Deng=
github.com/pointlander/peg v1.0.0 h1:rtCtA6Fu6xJpILX8WJfU+cvrcKmXgTfG/v+bkLP8NYY=
github.com/pointlander/peg v1.0.0/go.mod h1:WJTMcgeWYr6fZz4CwHnY1oWZCXew8GWCF93FaAxPrh4=
+github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3 h1:Amgs0nbayPhBNGh1qPqqr2e7B2qNAcBgRjnBH/lmn8k=
+github.com/polyfloyd/go-errorlint v0.0.0-20201006195004-351e25ade6e3/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a h1:AA9vgIBDjMHPC2McaGPojgV2dcI78ZC0TLNhYCXEKH8=
github.com/prashantv/protectmem v0.0.0-20171002184600-e20412882b3a/go.mod h1:lzZQ3Noex5pfAy7mkAeCjcBDteYU85uWWnJ/y6gKU8k=
@@ -748,6 +862,11 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/prometheus v1.8.2-0.20200420081721-18254838fbe2 h1:JtWnHSHMC1h8mb6K5GsFzmhY/WMILsxQ4slsJu+lyg8=
github.com/prometheus/prometheus v1.8.2-0.20200420081721-18254838fbe2/go.mod h1:ZnfuiMn3LNsry2q7ECmRe4WcscxmJSd2dIFpOi4w3lM=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
+github.com/quasilyte/go-ruleguard v0.2.0 h1:UOVMyH2EKkxIfzrULvA9n/tO+HtEhqD9mrLSWMr5FwU=
+github.com/quasilyte/go-ruleguard v0.2.0/go.mod h1:2RT/tf0Ce0UDj5y243iWKosQogJd8+1G3Rs2fxmlYnw=
+github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY=
+github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0=
github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs=
github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@@ -759,6 +878,8 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
@@ -766,6 +887,10 @@ github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rveen/ogdl v0.0.0-20200522080342-eeeda1a978e7 h1:Lftq+hHvm0kPWM1sDNqx1jkXAo1zw2YceoFo1hdyj7I=
github.com/rveen/ogdl v0.0.0-20200522080342-eeeda1a978e7/go.mod h1:9fqUB54wJS9u5TSXJZhRfTdh1lXVxTytDjed7t2cNdw=
+github.com/ryancurrah/gomodguard v1.1.0 h1:DWbye9KyMgytn8uYpuHkwf0RHqAYO6Ay/D0TbCpPtVU=
+github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM=
+github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw=
+github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
@@ -776,13 +901,20 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seborama/govcr v2.2.1+incompatible h1:rELLpGxrv9ahY6zC5ruwNJtbNaSYsIC5VE9q7pI/+3I=
github.com/seborama/govcr v2.2.1+incompatible/go.mod h1:EgcISudCCYDLzbiAImJ8i7kk4+wTA44Kp+j4S0LhASI=
+github.com/securego/gosec/v2 v2.5.0 h1:kjfXLeKdk98gBe2+eYRFMpC4+mxmQQtbidpiiOQ69Qc=
+github.com/securego/gosec/v2 v2.5.0/go.mod h1:L/CDXVntIff5ypVHIkqPXbtRpJiNCh6c6Amn68jXDjo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
+github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs=
+github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
github.com/shirou/gopsutil v2.17.13-0.20180801053943-8048a2e9c577+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil v2.20.5+incompatible h1:tYH07UPoQt0OCQdgWWMgYHy3/a9bcxNpBIysykNIP7I=
github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
@@ -791,6 +923,7 @@ github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvH
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
@@ -802,7 +935,11 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9
github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs=
github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY=
+github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
+github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ=
+github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
@@ -835,6 +972,8 @@ github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4=
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
+github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA=
+github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
@@ -850,7 +989,13 @@ github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d h1:YN4gX82mT31qs
github.com/subosito/gotenv v1.2.1-0.20190917103637-de67a6614a4d/go.mod h1:GVSeM7r0P1RI1gOKYyN9IuNkhMmQwKGsjVf3ulDrdzo=
github.com/tcnksm/go-gitconfig v0.1.2 h1:iiDhRitByXAEyjgBqsKi9QU4o2TNtv9kPP3RgPgXBPw=
github.com/tcnksm/go-gitconfig v0.1.2/go.mod h1:/8EhP4H7oJZdIPyT+/UIsG87kTzrzM4UsLGSItWYCpE=
+github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2 h1:Xr9gkxfOP0KQWXKNqmwe8vEeSUiUj4Rlee9CMVX2ZUQ=
+github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
+github.com/tetafro/godot v1.3.0 h1:rKXb6aAz2AnwS98jYlU3snCFFXnIInQdaGiftNwpj+k=
+github.com/tetafro/godot v1.3.0/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e h1:RumXZ56IrCj4CL+g1b9OL/oH0QnsF976bC8xQFYUD5Q=
+github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA=
@@ -861,6 +1006,10 @@ github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKw
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tomarrell/wrapcheck v0.0.0-20200807122107-df9e8bcb914d h1:3EZyvNUMsGD1QA8cu0STNn1L7I77rvhf2IhOcHYQhSw=
+github.com/tomarrell/wrapcheck v0.0.0-20200807122107-df9e8bcb914d/go.mod h1:yiFB6fFoV7saXirUGfuK+cPtUh4NX/Hf5y2WC2lehu0=
+github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa h1:RC4maTWLKKwb7p1cnoygsbKIgNlJqSYBeAFON3Ar8As=
+github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/twmb/murmur3 v1.1.4 h1:NnlAxelwOgdQDmYuV0T/K+tpDQ/8wdsDVOGmvUqBOCw=
github.com/twmb/murmur3 v1.1.4/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
@@ -877,9 +1026,18 @@ github.com/uber/tchannel-go v1.14.0/go.mod h1:Rrgz1eL8kMjW/nEzZos0t+Heq0O4LhnUJV
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
+github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
+github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg=
+github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/uudashr/gocognit v1.0.1 h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs=
+github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA=
+github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vmihailenco/msgpack v2.8.3+incompatible h1:76LCLwxS08gKHRpGA10PBxfWk72JfUH6mgzp2+URwYM=
@@ -900,7 +1058,9 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
@@ -1011,6 +1171,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1042,15 +1204,21 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -1064,6 +1232,7 @@ golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDq
golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1075,10 +1244,25 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200305205014-bc073721adb6/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200410194907-79a7a3126eef/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b h1:zSzQJAznWxAh9fZxiPy2FZo+ZZEYoYFYYDYdOrU7AaM=
golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752 h1:2ntEwh02rqo2jSsrYmp4yKHHjh0CbXP3ZtSUetSB+q8=
golang.org/x/tools v0.0.0-20201013201025-64a9e34f3752/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1217,6 +1401,14 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d h1:t8TAw9WgTLghti7RYkpPmqk4JtQ3+wcP5GgZqgWeWLQ=
+mvdan.cc/gofumpt v0.0.0-20200802201014-ab5a8192947d/go.mod h1:bzrjFmaD6+xqohD3KYP0H2FEuxknnBmyyOxdhLdaIws=
+mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
+mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
+mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
+mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
+mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7 h1:kAREL6MPwpsk1/PQPFD3Eg7WAQR5mPTWZJaBiG5LDbY=
+mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
diff --git a/netlify.toml b/netlify.toml
index 5945b48124..25f08eec41 100644
--- a/netlify.toml
+++ b/netlify.toml
@@ -30,17 +30,17 @@
[[redirects]]
from = "/docs/how_to/single_node"
- to = "docs/quickstart"
+ to = "/docs/quickstart"
[[redirects]]
- from="docs/how_to/kubernetes"
- to="/docs/getting_started/kubernetes_cluster"
+ from="/docs/how_to/kubernetes"
+ to="/docs/cluster/kubernetes_cluster"
[[redirects]]
- from="docs/quickstart/kubernetes"
+ from="/docs/quickstart/kubernetes"
to="/docs/cluster/kubernetes_cluster"
# TODO: Fix this with new content type
[[redirects]]
from = "/talks"
- to = "docs/overview/media"
+ to = "/docs/overview/media"
\ No newline at end of file
diff --git a/scripts/docker-integration-tests/carbon/expected/a.b.json b/scripts/docker-integration-tests/carbon/expected/ab.json
similarity index 100%
rename from scripts/docker-integration-tests/carbon/expected/a.b.json
rename to scripts/docker-integration-tests/carbon/expected/ab.json
diff --git a/scripts/docker-integration-tests/carbon/expected/a.ba.json b/scripts/docker-integration-tests/carbon/expected/aba.json
similarity index 100%
rename from scripts/docker-integration-tests/carbon/expected/a.ba.json
rename to scripts/docker-integration-tests/carbon/expected/aba.json
diff --git a/scripts/docker-integration-tests/carbon/expected/a.b.c.json b/scripts/docker-integration-tests/carbon/expected/abc.json
similarity index 100%
rename from scripts/docker-integration-tests/carbon/expected/a.b.c.json
rename to scripts/docker-integration-tests/carbon/expected/abc.json
diff --git a/scripts/docker-integration-tests/carbon/expected/a.b.c.d.json b/scripts/docker-integration-tests/carbon/expected/abcd.json
similarity index 100%
rename from scripts/docker-integration-tests/carbon/expected/a.b.c.d.json
rename to scripts/docker-integration-tests/carbon/expected/abcd.json
diff --git a/scripts/docker-integration-tests/carbon/expected/dbaz.json b/scripts/docker-integration-tests/carbon/expected/dbaz.json
new file mode 100644
index 0000000000..7b1405f615
--- /dev/null
+++ b/scripts/docker-integration-tests/carbon/expected/dbaz.json
@@ -0,0 +1,9 @@
+[
+ {
+ "id": "d.bar.baz",
+ "text": "baz",
+ "leaf": 1,
+ "expandable": 0,
+ "allowChildren": 0
+ }
+]
diff --git a/scripts/docker-integration-tests/carbon/expected/ebaz.json b/scripts/docker-integration-tests/carbon/expected/ebaz.json
new file mode 100644
index 0000000000..2409cc9539
--- /dev/null
+++ b/scripts/docker-integration-tests/carbon/expected/ebaz.json
@@ -0,0 +1,9 @@
+[
+ {
+ "id": "e.bar.baz",
+ "text": "baz",
+ "leaf": 1,
+ "expandable": 0,
+ "allowChildren": 0
+ }
+]
diff --git a/scripts/docker-integration-tests/carbon/expected/fbaz.json b/scripts/docker-integration-tests/carbon/expected/fbaz.json
new file mode 100644
index 0000000000..a5ef84b312
--- /dev/null
+++ b/scripts/docker-integration-tests/carbon/expected/fbaz.json
@@ -0,0 +1,9 @@
+[
+ {
+ "id": "f.bar.baz",
+ "text": "baz",
+ "leaf": 1,
+ "expandable": 0,
+ "allowChildren": 0
+ }
+]
diff --git a/scripts/docker-integration-tests/carbon/expected/gbaz.json b/scripts/docker-integration-tests/carbon/expected/gbaz.json
new file mode 100644
index 0000000000..6b073b6f80
--- /dev/null
+++ b/scripts/docker-integration-tests/carbon/expected/gbaz.json
@@ -0,0 +1,9 @@
+[
+ {
+ "id": "g.bar.baz",
+ "text": "baz",
+ "leaf": 1,
+ "expandable": 0,
+ "allowChildren": 0
+ }
+]
diff --git a/scripts/docker-integration-tests/carbon/expected/hbarbaz.json b/scripts/docker-integration-tests/carbon/expected/hbarbaz.json
new file mode 100644
index 0000000000..dd9c581125
--- /dev/null
+++ b/scripts/docker-integration-tests/carbon/expected/hbarbaz.json
@@ -0,0 +1,9 @@
+[
+ {
+ "id": "h.bar_baz",
+ "text": "bar_baz",
+ "leaf": 1,
+ "expandable": 0,
+ "allowChildren": 0
+ }
+]
diff --git a/scripts/docker-integration-tests/carbon/expected/ibarbaz.json b/scripts/docker-integration-tests/carbon/expected/ibarbaz.json
new file mode 100644
index 0000000000..7832da2263
--- /dev/null
+++ b/scripts/docker-integration-tests/carbon/expected/ibarbaz.json
@@ -0,0 +1,9 @@
+[
+ {
+ "id": "i.bar_baz",
+ "text": "bar_baz",
+ "leaf": 1,
+ "expandable": 0,
+ "allowChildren": 0
+ }
+]
diff --git a/scripts/docker-integration-tests/carbon/m3coordinator.yml b/scripts/docker-integration-tests/carbon/m3coordinator.yml
index dcff5f3a08..f0180b4e7e 100644
--- a/scripts/docker-integration-tests/carbon/m3coordinator.yml
+++ b/scripts/docker-integration-tests/carbon/m3coordinator.yml
@@ -14,6 +14,8 @@ clusters:
carbon:
ingester:
listenAddress: "0.0.0.0:7204"
+ rewrite:
+ cleanup: true
rules:
- pattern: .*min.aggregate.*
aggregation:
diff --git a/scripts/docker-integration-tests/carbon/test.sh b/scripts/docker-integration-tests/carbon/test.sh
index 3094318a62..993aef6b3f 100755
--- a/scripts/docker-integration-tests/carbon/test.sh
+++ b/scripts/docker-integration-tests/carbon/test.sh
@@ -53,7 +53,7 @@ t=$(date +%s)
echo "foo.min.aggregate.baz 41 $t" | nc 0.0.0.0 7204
echo "foo.min.aggregate.baz 42 $t" | nc 0.0.0.0 7204
echo "Attempting to read min aggregated carbon metric"
-ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff read_carbon foo.min.aggregate.baz 41
+ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'foo.min.aggregate.baz' 41"
echo "Writing out a carbon metric that should not be aggregated"
t=$(date +%s)
@@ -64,7 +64,7 @@ t=$(date +%s)
echo "foo.min.already-aggregated.baz 42 $t" | nc 0.0.0.0 7204
echo "foo.min.already-aggregated.baz 43 $t" | nc 0.0.0.0 7204
echo "Attempting to read unaggregated carbon metric"
-ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff read_carbon foo.min.already-aggregated.baz 43
+ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'foo.min.already-aggregated.baz' 43"
echo "Writing out a carbon metric that should should use the default mean aggregation"
t=$(date +%s)
@@ -72,19 +72,35 @@ t=$(date +%s)
echo "foo.min.catch-all.baz 10 $t" | nc 0.0.0.0 7204
echo "foo.min.catch-all.baz 20 $t" | nc 0.0.0.0 7204
echo "Attempting to read mean aggregated carbon metric"
-ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff read_carbon foo.min.catch-all.baz 15
+ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'foo.min.catch-all.baz' 15"
# Test writing and reading IDs with colons in them.
t=$(date +%s)
echo "foo.bar:baz.qux 42 $t" | nc 0.0.0.0 7204
-ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff read_carbon 'foo.bar:*.*' 42
+ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'foo.bar:*.*' 42"
# Test writing and reading IDs with a single element.
t=$(date +%s)
echo "quail 42 $t" | nc 0.0.0.0 7204
-ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff read_carbon 'quail' 42
+ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'quail' 42"
+# Test using "**" in queries
t=$(date +%s)
+echo "qux.pos1-a.pos2-0 1 $t" | nc 0.0.0.0 7204
+echo "qux.pos1-a.pos2-1 1 $t" | nc 0.0.0.0 7204
+echo "qux.pos1-b.pos2-0 1 $t" | nc 0.0.0.0 7204
+echo "qux.pos1-b.pos2-1 1 $t" | nc 0.0.0.0 7204
+echo "qux.pos1-c.pos2-0 1 $t" | nc 0.0.0.0 7204
+echo "qux.pos1-c.pos2-1 1 $t" | nc 0.0.0.0 7204
+ATTEMPTS=20 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'sum(qux**)' 6"
+ATTEMPTS=2 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'sum(qux.pos1-a**)' 2"
+ATTEMPTS=2 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'sum(**pos1-a**)' 2"
+ATTEMPTS=2 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'sum(**pos2-1**)' 3"
+ATTEMPTS=2 MAX_TIMEOUT=4 TIMEOUT=1 retry_with_backoff "read_carbon 'sum(**pos2-1)' 3"
+
+t=$(date +%s)
+
+# Test basic cases
echo "a 0 $t" | nc 0.0.0.0 7204
echo "a.bar 0 $t" | nc 0.0.0.0 7204
echo "a.biz 0 $t" | nc 0.0.0.0 7204
@@ -92,13 +108,32 @@ echo "a.biz.cake 0 $t" | nc 0.0.0.0 7204
echo "a.bar.caw.daz 0 $t" | nc 0.0.0.0 7204
echo "a.bag 0 $t" | nc 0.0.0.0 7204
echo "c:bar.c:baz 0 $t" | nc 0.0.0.0 7204
-ATTEMPTS=10 TIMEOUT=1 retry_with_backoff find_carbon a* a.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon a.b* a.b.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon a.ba[rg] a.ba.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon a.b*.c* a.b.c.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon a.b*.caw.* a.b.c.d.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon x none.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon a.d none.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon *.*.*.*.* none.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon c:* cbar.json
-ATTEMPTS=2 TIMEOUT=1 retry_with_backoff find_carbon c:bar.* cbaz.json
+
+# Test rewrite multiple dots
+echo "d..bar.baz 0 $t" | nc 0.0.0.0 7204
+echo "e.bar...baz 0 $t" | nc 0.0.0.0 7204
+
+# Test rewrite leading or trailing dots
+echo "..f.bar.baz 0 $t" | nc 0.0.0.0 7204
+echo "g.bar.baz.. 0 $t" | nc 0.0.0.0 7204
+
+# Test rewrite bad chars
+echo "h.bar@@baz 0 $t" | nc 0.0.0.0 7204
+echo "i.bar!!baz 0 $t" | nc 0.0.0.0 7204
+
+ATTEMPTS=10 TIMEOUT=1 retry_with_backoff "find_carbon 'a*' a.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'a.b*' ab.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'a.ba[rg]' aba.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'a.b*.c*' abc.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'a.b*.caw.*' abcd.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'x' none.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'a.d' none.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon '*.*.*.*.*' none.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'c:*' cbar.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'c:bar.*' cbaz.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'd.bar.*' dbaz.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'e.bar.*' ebaz.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'f.bar.*' fbaz.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'g.bar.*' gbaz.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'h.bar*' hbarbaz.json"
+ATTEMPTS=2 TIMEOUT=1 retry_with_backoff "find_carbon 'i.bar*' ibarbaz.json"
diff --git a/scripts/docker-integration-tests/coordinator_noop/test.sh b/scripts/docker-integration-tests/coordinator_noop/test.sh
index e6e244b11a..9a3a746c4f 100755
--- a/scripts/docker-integration-tests/coordinator_noop/test.sh
+++ b/scripts/docker-integration-tests/coordinator_noop/test.sh
@@ -43,7 +43,7 @@ if ! curl -vvvsSf localhost:7201/api/v1/services/m3coordinator/placement; then
exit 1
fi
-QUERY_EXP='{"error":"operation not valid for noop client"}'
+QUERY_EXP='{"status":"error","error":"operation not valid for noop client"}'
RES=$(curl "localhost:7201/m3query/api/v1/query_range?start=$(date '+%s')&end=$(date '+%s')&step=10&query=foo")
if [[ "$RES" != "$QUERY_EXP" ]]; then
echo "Expected resp '$QUERY_EXP', GOT '$RES'"
diff --git a/site/config/_default/config.toml b/site/config/_default/config.toml
index 9b00108ee0..1f504b611b 100644
--- a/site/config/_default/config.toml
+++ b/site/config/_default/config.toml
@@ -32,7 +32,6 @@
target = "archetypes"
ignoreFiles = [ "\\.ttf$", "\\.woff$", "\\.woff2$", "\\.eot$" ]
-enableRobotsTXT = true
metaDataFormat = "yaml"
# theme = "docs-theme"
# baseURL = "/"
@@ -44,8 +43,8 @@ defaultContentLanguageInSubdir = true
[languages]
[languages.en]
-title = "Kubernetes"
-description = "Production-Grade Container Orchestration"
+title = "M3 Documentation"
+description = "M3 is a Prometheus compatible, easy to adopt metrics engine that provides visibility for some of the world’s largest brands."
languageName ="English"
# Weight used for sorting.
weight = 1
@@ -110,9 +109,10 @@ offlineSearch = false
# Prefix URL to edit current page. Will display an "Edit this page" button on top right hand corner of every page.
# Useful to give opportunity to people to create merge request for your doc.
# See the config.toml file from this documentation site to have an example.
- # TODO: pattern to branch?
- # TODO: bring back
- # editURL = "https://github.com/m3db/m3/tree/master/site/content/"
+
+ helpBlock = true
+ editURL = "https://github.com/m3db/m3/tree/master/site/content/"
+
# Author of the site, will be used in meta information
author = "The M3 team and community"
# Description of the site, will be used in meta information
diff --git a/site/config/production/config.toml b/site/config/production/config.toml
index 0c854d1dae..3110a123fa 100644
--- a/site/config/production/config.toml
+++ b/site/config/production/config.toml
@@ -52,6 +52,7 @@ ignoreFiles = [ "\\.ttf$", "\\.woff$", "\\.woff2$", "\\.eot$" ]
languageCode = "en-US"
defaultContentLanguage = "en"
# staticDir = ["static"]
+enableRobotsTXT = true
metaDataFormat = "yaml"
defaultContentLanguageInSubdir= true
@@ -101,11 +102,7 @@ offlineSearch = false
# Prefix URL to edit current page. Will display an "Edit this page" button on top right hand corner of every page.
# Useful to give opportunity to people to create merge request for your doc.
# See the config.toml file from this documentation site to have an example.
- # TODO: pattern to branch?
- # TODO: bring back
- # editURL = "https://github.com/m3db/m3/tree/master/site/content/"
- # Shows a checkmark for visited pages on the menu
- showVisitedLinks = false
+
# Disable search function. It will hide search bar
disableSearch = false
# Javascript and CSS cache are automatically busted when new version of site is generated.
@@ -124,7 +121,7 @@ offlineSearch = false
# Order sections in menu by "weight" or "title". Default to "weight"
ordersectionsby = "weight"
# Change default color scheme with a variant one. Can be "red", "blue", "green".
- themeVariant = "blue"
+ # themeVariant = "blue"
twitter = "m3db_io"
disableHomeIcon = true
diff --git a/site/content/cluster/binaries_cluster.md b/site/content/cluster/binaries_cluster.md
index b95a87306b..5c2ac410b1 100644
--- a/site/content/cluster/binaries_cluster.md
+++ b/site/content/cluster/binaries_cluster.md
@@ -1,10 +1,9 @@
---
linktitle: "Binaries"
+title: Creating an M3 Cluster with Binaries
weight: 2
---
-# Creating an M3 Cluster with Binaries
-
This guide shows you the steps involved in creating an M3 cluster using M3 binaries, typically you would automate this with infrastructure as code tools such as Terraform or [Kubernetes](/docs/operator).
{{% notice note %}}
diff --git a/site/content/cluster/kubernetes_cluster.md b/site/content/cluster/kubernetes_cluster.md
index 91ae49e0a3..a58314cd22 100644
--- a/site/content/cluster/kubernetes_cluster.md
+++ b/site/content/cluster/kubernetes_cluster.md
@@ -1,10 +1,9 @@
---
linktitle: "Kubernetes"
+title: Creating an M3 Cluster with Kubernetes
weight: 1
---
-# Creating an M3 Cluster with Kubernetes
-
This guide shows you how to create an M3 cluster of 3 nodes, designed to run locally on the same machine. It is designed to show you how M3 and Kubernetes can work together, but not as a production example.
{{% notice note %}}
diff --git a/site/content/faqs/_index.md b/site/content/faqs/_index.md
index d9b74add0a..21b863d945 100644
--- a/site/content/faqs/_index.md
+++ b/site/content/faqs/_index.md
@@ -1,80 +1,5 @@
----
-title: "FAQs"
-weight: 9
-chapter: true
----
-
-- **Is there a way to disable M3DB embedded `etcd` and just use an external `etcd` cluster?**
-Yes, you can definitely do that. It's all just about setting the etcd endpoints in config as etcd hosts instead of M3DB hosts. See [these docs](/docs/operational_guide/etcd#external-etcd) for more information on configuring an external `etcd` cluster.
-
-- **Is there a client that lets me send metrics to m3coordinator without going through Prometheus?**
-Yes, you can use the [Prometheus remote write client](https://github.com/m3db/prometheus_remote_client_golang/).
-
-- **Why does my dbnode keep OOM’ing?**
-Refer to the [troubleshooting guide](/docs/troubleshooting).
-
-- **Do you support PromQL?**
-Yes, M3Query and M3Coordinator both support PromQL.
-
-- **Do you support Graphite?**
-Yes, M3Query and M3Coordinator both support Graphite.
-
-- **Does M3DB store both data and (tag) metadata on the same node?**
-Yes it stores the data (i.e. the timeseries datapoints) as well as the tags since it has an embedded index. Make sure you have `IndexEnabled` set to `true` in your namespace configuration
-
-- **How are writes handled and how is the data kept consistent within M3DB?**
-M3 uses quorum/majority consistency to ensure data is written to replicas in a way that can be read back consistently.
-For example, if you have a replication factor of 3 and you set your write and read consistencies to quorum, then all writes will only succeed if they make it to at least 2 of the 3 replicas, and reads will only succeed if they get results back from at least 2 of the 3 replicas
-
-- **Do I need to restart M3DB if I add a namespace?**
-If you’re adding namespaces, the m3dbnode process will pickup the new namespace without a restart.
-
-- **Do I need to restart M3DB if I change or delete a namespace?**
-If you’re removing or modifying an existing namespace, you’ll need to restart the m3dbnode process in order to complete the namespace deletion/modification process. It is recommended to restart one node at a time and wait for a node to be completely bootstrapped before restarting another node.
-
-- **How do I set up aggregation in the coordinator?**
-Refer to the [Aggregation section](/docs/how_to/query) of the M3Query how-to guide.
-
-- **How do I set up aggregation using a separate aggregation tier?**
-See this [WIP documentation](https://github.com/m3db/m3/pull/1741/files#diff-0a1009f86783ca8fd4499418e556c6f5).
-
-- **Can you delete metrics from M3DB?**
-Not yet, but that functionality is currently being worked on.
-
-- **How can you tell if a node is snapshotting?**
-You can check if your nodes are snapshotting by looking at the `Background tasks` tab in the [M3DB Grafana dashboard](https://grafana.com/dashboards/8126).
-
-- **How do you list all available API endpoints?**
-See [M3DB OpenAPI](https://m3db.io/openapi).
-
-- **What is the recommended way to upgrade my M3 stack?**
-See the [Upgrading M3](/docs/operational_guide/upgrading_m3) guide.
-
-- **When graphing my Prometheus data in Grafana, I see gaps. How do I resolve this?**
-This is due to M3 having a concept of `null` datapoints whereas Prometheus does not. To resolve this, change `Stacking & Null value` to `Connected` under the `Visualization` tab of your graph.
-
-- **I am receiving the error `"could not create etcd watch","error":"etcd watch create timed out after 10s for key: _sd.placement/default_env/m3db"`**
-This is due to the fact that M3DB, M3Coordinator, etc. could not connect to the `etcd` server. Make sure that the endpoints listed under in the following config section are correct AND the correct configuration file is being used.
-```yaml
-etcdClusters:
- - zone: embedded
- endpoints:
- - HOST1_STATIC_IP_ADDRESS:2379
- - HOST2_STATIC_IP_ADDRESS:2379
- - HOST3_STATIC_IP_ADDRESS:2379
-```
-
-- **How can I get a heap dump, cpu profile, etc.**
-See our docs on the [/debug/dump api](/docs/troubleshooting)
-
-- **How much memory utilization should I run M3DB at?**
-We recommend not going above 50%.
-
-- **What is the recommended hardware to run on?**
-TBA
-
-- **What is the recommended way to create a new namespace?**
-Refer to the [Namespace configuration guide](/docs/operational_guide/namespace_configuration).
-
-- **How can I see the cardinality of my metrics?**
-Currently, the best way is to go to the [M3DB Node Details Dashboard](https://grafana.com/grafana/dashboards/8126) and look at the `Ticking` panel. However, this is not entirely accurate because of the way data is stored in M3DB -- time series are stored inside time-based blocks that you configure. In actuality, the `Ticking` graph shows you how many unique series there are for the most recent block that has persisted. In the future, we plan to introduce easier ways to determine the number of unique time series.
++++
+title = "FAQs"
+weight = 9
+chapter = true
++++
\ No newline at end of file
diff --git a/site/content/faqs/faqs.md b/site/content/faqs/faqs.md
new file mode 100644
index 0000000000..027bacd784
--- /dev/null
+++ b/site/content/faqs/faqs.md
@@ -0,0 +1,80 @@
+---
+title: "Operational"
+weight: 9
+chapter: true
+---
+
+- **Is there a way to disable M3DB embedded `etcd` and just use an external `etcd` cluster?**
+Yes, you can definitely do that. It's all just about setting the etcd endpoints in config as etcd hosts instead of M3DB hosts. See [these docs](/docs/operational_guide/etcd#external-etcd) for more information on configuring an external `etcd` cluster.
+
+- **Is there a client that lets me send metrics to m3coordinator without going through Prometheus?**
+Yes, you can use the [Prometheus remote write client](https://github.com/m3db/prometheus_remote_client_golang/).
+
+- **Why does my dbnode keep OOM’ing?**
+Refer to the [troubleshooting guide](/docs/faqs/troubleshooting).
+
+- **Do you support PromQL?**
+Yes, M3Query and M3Coordinator both support PromQL.
+
+- **Do you support Graphite?**
+Yes, M3Query and M3Coordinator both support Graphite.
+
+- **Does M3DB store both data and (tag) metadata on the same node?**
+Yes it stores the data (i.e. the timeseries datapoints) as well as the tags since it has an embedded index. Make sure you have `IndexEnabled` set to `true` in your namespace configuration
+
+- **How are writes handled and how is the data kept consistent within M3DB?**
+M3 uses quorum/majority consistency to ensure data is written to replicas in a way that can be read back consistently.
+For example, if you have a replication factor of 3 and you set your write and read consistencies to quorum, then all writes will only succeed if they make it to at least 2 of the 3 replicas, and reads will only succeed if they get results back from at least 2 of the 3 replicas
+
+- **Do I need to restart M3DB if I add a namespace?**
+If you’re adding namespaces, the m3dbnode process will pickup the new namespace without a restart.
+
+- **Do I need to restart M3DB if I change or delete a namespace?**
+If you’re removing or modifying an existing namespace, you’ll need to restart the m3dbnode process in order to complete the namespace deletion/modification process. It is recommended to restart one node at a time and wait for a node to be completely bootstrapped before restarting another node.
+
+- **How do I set up aggregation in the coordinator?**
+Refer to the [Aggregation section](/docs/how_to/query) of the M3Query how-to guide.
+
+- **How do I set up aggregation using a separate aggregation tier?**
+See this [WIP documentation](https://github.com/m3db/m3/pull/1741/files#diff-0a1009f86783ca8fd4499418e556c6f5).
+
+- **Can you delete metrics from M3DB?**
+Not yet, but that functionality is currently being worked on.
+
+- **How can you tell if a node is snapshotting?**
+You can check if your nodes are snapshotting by looking at the `Background tasks` tab in the [M3DB Grafana dashboard](https://grafana.com/dashboards/8126).
+
+- **How do you list all available API endpoints?**
+See [M3DB OpenAPI](https://m3db.io/openapi).
+
+- **What is the recommended way to upgrade my M3 stack?**
+See the [Upgrading M3](/docs/operational_guide/upgrading_m3) guide.
+
+- **When graphing my Prometheus data in Grafana, I see gaps. How do I resolve this?**
+This is due to M3 having a concept of `null` datapoints whereas Prometheus does not. To resolve this, change `Stacking & Null value` to `Connected` under the `Visualization` tab of your graph.
+
+- **I am receiving the error `"could not create etcd watch","error":"etcd watch create timed out after 10s for key: _sd.placement/default_env/m3db"`**
+This is due to the fact that M3DB, M3Coordinator, etc. could not connect to the `etcd` server. Make sure that the endpoints listed under in the following config section are correct AND the correct configuration file is being used.
+```yaml
+etcdClusters:
+ - zone: embedded
+ endpoints:
+ - HOST1_STATIC_IP_ADDRESS:2379
+ - HOST2_STATIC_IP_ADDRESS:2379
+ - HOST3_STATIC_IP_ADDRESS:2379
+```
+
+- **How can I get a heap dump, cpu profile, etc.**
+See our docs on the [/debug/dump api](/docs/faqs/troubleshooting)
+
+- **How much memory utilization should I run M3DB at?**
+We recommend not going above 50%.
+
+- **What is the recommended hardware to run on?**
+TBA
+
+- **What is the recommended way to create a new namespace?**
+Refer to the [Namespace configuration guide](/docs/operational_guide/namespace_configuration).
+
+- **How can I see the cardinality of my metrics?**
+Currently, the best way is to go to the [M3DB Node Details Dashboard](https://grafana.com/grafana/dashboards/8126) and look at the `Ticking` panel. However, this is not entirely accurate because of the way data is stored in M3DB -- time series are stored inside time-based blocks that you configure. In actuality, the `Ticking` graph shows you how many unique series there are for the most recent block that has persisted. In the future, we plan to introduce easier ways to determine the number of unique time series.
diff --git a/site/content/troubleshooting/_index.md b/site/content/faqs/troubleshooting.md
similarity index 100%
rename from site/content/troubleshooting/_index.md
rename to site/content/faqs/troubleshooting.md
diff --git a/site/content/quickstart/binaries.md b/site/content/quickstart/binaries.md
index ba0ba10730..61da32c74e 100644
--- a/site/content/quickstart/binaries.md
+++ b/site/content/quickstart/binaries.md
@@ -1,12 +1,9 @@
---
linktitle: "Binaries"
+title: Creating a Single Node M3 Cluster with Binaries
weight: 3
---
-
-
-# Creating a Single Node M3 Cluster with Binaries
-
This guide shows how to install and configure M3, create a single-node cluster, and read and write metrics to it.
{{% notice warning %}}
diff --git a/site/go.mod b/site/go.mod
index d796614d48..c51383b444 100644
--- a/site/go.mod
+++ b/site/go.mod
@@ -2,4 +2,4 @@ module m3-site
go 1.15
-require github.com/chronosphereio/victor v0.0.0-20201217171243-b2ba7c848932 // indirect
+require github.com/chronosphereio/victor v0.0.0-20201222112852-eaf9ae24e2db // indirect
diff --git a/site/go.sum b/site/go.sum
index c73280fa61..14aceadf1b 100644
--- a/site/go.sum
+++ b/site/go.sum
@@ -1,2 +1,4 @@
github.com/chronosphereio/victor v0.0.0-20201217171243-b2ba7c848932 h1:NcHWcnCIEo2XaIXoTospzTazsu0PgDvcbXX+47cy8/I=
github.com/chronosphereio/victor v0.0.0-20201217171243-b2ba7c848932/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM=
+github.com/chronosphereio/victor v0.0.0-20201222112852-eaf9ae24e2db h1:qEMGd5zaqbT8eaKJ25n99rgakWgUhX3xnsLae1N6Si8=
+github.com/chronosphereio/victor v0.0.0-20201222112852-eaf9ae24e2db/go.mod h1:wz1ngMsk+1D1ug2ObnI3zXs+/ZdBPrWLb6R1WQW3XNM=
diff --git a/site/layouts/404.html b/site/layouts/404.html
new file mode 100644
index 0000000000..f22174a445
--- /dev/null
+++ b/site/layouts/404.html
@@ -0,0 +1,234 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ M3: Open Source Metrics Engine
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 404:
+
+ Page not found
+
+
+ Sorry, but the page you were trying to view does not exist.
+
+
+
+ Return to homepage
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/site/static/robots.txt b/site/static/robots.txt
new file mode 100644
index 0000000000..4f9540ba35
--- /dev/null
+++ b/site/static/robots.txt
@@ -0,0 +1 @@
+User-agent: *
\ No newline at end of file
diff --git a/src/aggregator/client/conn_options.go b/src/aggregator/client/conn_options.go
index 7a85b67581..71b5a89886 100644
--- a/src/aggregator/client/conn_options.go
+++ b/src/aggregator/client/conn_options.go
@@ -117,15 +117,15 @@ type ConnectionOptions interface {
type connectionOptions struct {
clockOpts clock.Options
instrumentOpts instrument.Options
+ writeRetryOpts retry.Options
+ rwOpts xio.Options
connTimeout time.Duration
- connKeepAlive bool
writeTimeout time.Duration
+ maxDuration time.Duration
initThreshold int
maxThreshold int
multiplier int
- maxDuration time.Duration
- writeRetryOpts retry.Options
- rwOpts xio.Options
+ connKeepAlive bool
}
// NewConnectionOptions create a new set of connection options.
diff --git a/src/aggregator/client/payload.go b/src/aggregator/client/payload.go
index ffc2722450..a9c6b6f5a9 100644
--- a/src/aggregator/client/payload.go
+++ b/src/aggregator/client/payload.go
@@ -55,8 +55,8 @@ type timedPayload struct {
}
type timedWithStagedMetadatas struct {
- metric aggregated.Metric
metadatas metadata.StagedMetadatas
+ metric aggregated.Metric
}
type passthroughPayload struct {
@@ -65,10 +65,10 @@ type passthroughPayload struct {
}
type payloadUnion struct {
- payloadType payloadType
- untimed untimedPayload
forwarded forwardedPayload
+ untimed untimedPayload
timed timedPayload
timedWithStagedMetadatas timedWithStagedMetadatas
passthrough passthroughPayload
+ payloadType payloadType
}
diff --git a/src/aggregator/client/ref_count.go b/src/aggregator/client/ref_count.go
index fd3f5a588b..fa67c8bbea 100644
--- a/src/aggregator/client/ref_count.go
+++ b/src/aggregator/client/ref_count.go
@@ -25,8 +25,8 @@ import "sync/atomic"
type destructorFn func()
type refCount struct {
- n int32
destructorFn destructorFn
+ n int32
}
func (rc *refCount) SetRefCount(n int) { atomic.StoreInt32(&rc.n, int32(n)) }
diff --git a/src/aggregator/client/tcp_client.go b/src/aggregator/client/tcp_client.go
index 725894049b..40a519457c 100644
--- a/src/aggregator/client/tcp_client.go
+++ b/src/aggregator/client/tcp_client.go
@@ -268,9 +268,10 @@ func (c *TCPClient) write(
return err
}
var (
- shardID = c.shardFn(metricID, uint32(placement.NumShards()))
- instances = placement.InstancesForShard(shardID)
- multiErr = xerrors.NewMultiError()
+ shardID = c.shardFn(metricID, uint32(placement.NumShards()))
+ instances = placement.InstancesForShard(shardID)
+ multiErr = xerrors.NewMultiError()
+ oneOrMoreSucceeded = false
)
for _, instance := range instances {
// NB(xichen): the shard should technically always be found because the instances
@@ -288,7 +289,15 @@ func (c *TCPClient) write(
}
if err = c.writerMgr.Write(instance, shardID, payload); err != nil {
multiErr = multiErr.Add(err)
+ continue
}
+
+ oneOrMoreSucceeded = true
+ }
+
+ if !oneOrMoreSucceeded {
+ // unrectifiable loss
+ c.metrics.dropped.Inc(1)
}
onPlacementDoneFn()
@@ -329,6 +338,7 @@ type tcpClientMetrics struct {
flush tally.Counter
shardNotOwned tally.Counter
shardNotWriteable tally.Counter
+ dropped tally.Counter
}
func newTCPClientMetrics(
@@ -343,5 +353,6 @@ func newTCPClientMetrics(
flush: scope.Counter("flush"),
shardNotOwned: scope.Counter("shard-not-owned"),
shardNotWriteable: scope.Counter("shard-not-writeable"),
+ dropped: scope.Counter("dropped"),
}
}
diff --git a/src/aggregator/client/writer.go b/src/aggregator/client/writer.go
index 4db58ae8ea..dfc3c628a3 100644
--- a/src/aggregator/client/writer.go
+++ b/src/aggregator/client/writer.go
@@ -65,14 +65,14 @@ type writer struct {
log *zap.Logger
metrics writerMetrics
- flushSize int
- maxTimerBatchSize int
encoderOpts protobuf.UnaggregatedOptions
queue instanceQueue
+ flushSize int
+ maxTimerBatchSize int
- closed bool
encodersByShard map[uint32]*lockedEncoder
newLockedEncoderFn newLockedEncoderFn
+ closed bool
}
func newInstanceWriter(instance placement.Instance, opts Options) instanceWriter {
@@ -534,8 +534,8 @@ func newWriterMetrics(s tally.Scope) writerMetrics {
}
type lockedEncoder struct {
- sync.Mutex
protobuf.UnaggregatedEncoder
+ sync.Mutex
}
func newLockedEncoder(encoderOpts protobuf.UnaggregatedOptions) *lockedEncoder {
@@ -544,8 +544,8 @@ func newLockedEncoder(encoderOpts protobuf.UnaggregatedOptions) *lockedEncoder {
}
type refCountedWriter struct {
- refCount
instanceWriter
+ refCount
}
func newRefCountedWriter(instance placement.Instance, opts Options) *refCountedWriter {
diff --git a/src/cluster/client/etcd/client.go b/src/cluster/client/etcd/client.go
index 381b41935d..dc4f717328 100644
--- a/src/cluster/client/etcd/client.go
+++ b/src/cluster/client/etcd/client.go
@@ -182,12 +182,18 @@ func (c *csclient) newkvOptions(
cacheFileFn cacheFileForZoneFn,
) etcdkv.Options {
kvOpts := etcdkv.NewOptions().
- SetInstrumentsOptions(instrument.NewOptions().
+ SetInstrumentsOptions(c.opts.InstrumentOptions().
SetLogger(c.logger).
SetMetricsScope(c.kvScope)).
SetCacheFileFn(cacheFileFn(opts.Zone())).
SetWatchWithRevision(c.opts.WatchWithRevision()).
- SetNewDirectoryMode(c.opts.NewDirectoryMode())
+ SetNewDirectoryMode(c.opts.NewDirectoryMode()).
+ SetEnableFastGets(c.opts.EnableFastGets()).
+ SetRetryOptions(c.opts.RetryOptions()).
+ SetRequestTimeout(c.opts.RequestTimeout()).
+ SetWatchChanInitTimeout(c.opts.WatchChanInitTimeout()).
+ SetWatchChanCheckInterval(c.opts.WatchChanCheckInterval()).
+ SetWatchChanResetInterval(c.opts.WatchChanResetInterval())
if ns := opts.Namespace(); ns != "" {
kvOpts = kvOpts.SetPrefix(kvOpts.ApplyPrefix(ns))
diff --git a/src/cluster/client/etcd/config.go b/src/cluster/client/etcd/config.go
index 539ca48387..49232be416 100644
--- a/src/cluster/client/etcd/config.go
+++ b/src/cluster/client/etcd/config.go
@@ -24,9 +24,12 @@ import (
"os"
"time"
+ "github.com/uber-go/tally"
+
"github.com/m3db/m3/src/cluster/client"
"github.com/m3db/m3/src/cluster/services"
"github.com/m3db/m3/src/x/instrument"
+ "github.com/m3db/m3/src/x/retry"
)
// ClusterConfig is the config for a zoned etcd cluster.
@@ -98,6 +101,15 @@ type Configuration struct {
SDConfig services.Configuration `yaml:"m3sd"`
WatchWithRevision int64 `yaml:"watchWithRevision"`
NewDirectoryMode *os.FileMode `yaml:"newDirectoryMode"`
+
+ Retry retry.Configuration `yaml:"retry"`
+ RequestTimeout time.Duration `yaml:"requestTimeout"`
+ WatchChanInitTimeout time.Duration `yaml:"watchChanInitTimeout"`
+ WatchChanCheckInterval time.Duration `yaml:"watchChanCheckInterval"`
+ WatchChanResetInterval time.Duration `yaml:"watchChanResetInterval"`
+ // EnableFastGets trades consistency for latency and throughput using clientv3.WithSerializable()
+ // on etcd ops.
+ EnableFastGets bool `yaml:"enableFastGets"`
}
// NewClient creates a new config service client.
@@ -114,7 +126,25 @@ func (cfg Configuration) NewOptions() Options {
SetCacheDir(cfg.CacheDir).
SetClusters(cfg.etcdClusters()).
SetServicesOptions(cfg.SDConfig.NewOptions()).
- SetWatchWithRevision(cfg.WatchWithRevision)
+ SetWatchWithRevision(cfg.WatchWithRevision).
+ SetEnableFastGets(cfg.EnableFastGets).
+ SetRetryOptions(cfg.Retry.NewOptions(tally.NoopScope))
+
+ if cfg.RequestTimeout > 0 {
+ opts = opts.SetRequestTimeout(cfg.RequestTimeout)
+ }
+
+ if cfg.WatchChanInitTimeout > 0 {
+ opts = opts.SetWatchChanInitTimeout(cfg.WatchChanInitTimeout)
+ }
+
+ if cfg.WatchChanCheckInterval > 0 {
+ opts = opts.SetWatchChanCheckInterval(cfg.WatchChanCheckInterval)
+ }
+
+ if cfg.WatchChanResetInterval > 0 {
+ opts = opts.SetWatchChanResetInterval(cfg.WatchChanResetInterval)
+ }
if v := cfg.NewDirectoryMode; v != nil {
opts = opts.SetNewDirectoryMode(*v)
diff --git a/src/cluster/client/etcd/options.go b/src/cluster/client/etcd/options.go
index 566d240e10..8d88985e52 100644
--- a/src/cluster/client/etcd/options.go
+++ b/src/cluster/client/etcd/options.go
@@ -44,6 +44,11 @@ const (
defaultKeepAlivePeriodMaxJitter = 10 * time.Second
defaultKeepAliveTimeout = 10 * time.Second
+ defaultRequestTimeout = 10 * time.Second
+ defaultWatchChanCheckInterval = 10 * time.Second
+ defaultWatchChanResetInterval = 10 * time.Second
+ defaultWatchChanInitTimeout = 10 * time.Second
+
defaultRetryInitialBackoff = 2 * time.Second
defaultRetryBackoffFactor = 2.0
defaultRetryMaxRetries = 3
@@ -171,8 +176,12 @@ func (o tlsOptions) Config() (*tls.Config, error) {
// NewOptions creates a set of Options.
func NewOptions() Options {
return options{
- sdOpts: services.NewOptions(),
- iopts: instrument.NewOptions(),
+ sdOpts: services.NewOptions(),
+ iopts: instrument.NewOptions(),
+ requestTimeout: defaultRequestTimeout,
+ watchChanInitTimeout: defaultWatchChanInitTimeout,
+ watchChanCheckInterval: defaultWatchChanCheckInterval,
+ watchChanResetInterval: defaultWatchChanResetInterval,
// NB(r): Set some default retry options so changes to retry
// option defaults don't change behavior of this client's retry options
retryOpts: retry.NewOptions().
@@ -186,16 +195,21 @@ func NewOptions() Options {
}
type options struct {
- env string
- zone string
- service string
- cacheDir string
- watchWithRevision int64
- sdOpts services.Options
- clusters map[string]Cluster
- iopts instrument.Options
- retryOpts retry.Options
- newDirectoryMode os.FileMode
+ requestTimeout time.Duration
+ env string
+ zone string
+ service string
+ cacheDir string
+ watchChanCheckInterval time.Duration
+ watchChanResetInterval time.Duration
+ watchChanInitTimeout time.Duration
+ watchWithRevision int64
+ enableFastGets bool
+ sdOpts services.Options
+ clusters map[string]Cluster
+ iopts instrument.Options
+ retryOpts retry.Options
+ newDirectoryMode os.FileMode
}
func (o options) Validate() error {
@@ -211,6 +225,22 @@ func (o options) Validate() error {
return errors.New("invalid options, no instrument options set")
}
+ if o.watchChanCheckInterval <= 0 {
+ return errors.New("invalid watch channel check interval")
+ }
+
+ if o.watchChanResetInterval <= 0 {
+ return errors.New("invalid watch reset interval")
+ }
+
+ if o.watchChanInitTimeout <= 0 {
+ return errors.New("invalid watch init interval")
+ }
+
+ if o.requestTimeout <= 0 {
+ return errors.New("invalid request timeout")
+ }
+
return nil
}
@@ -289,6 +319,17 @@ func (o options) SetInstrumentOptions(iopts instrument.Options) Options {
return o
}
+//nolint:gocritic
+func (o options) RequestTimeout() time.Duration {
+ return o.requestTimeout
+}
+
+//nolint:gocritic
+func (o options) SetRequestTimeout(t time.Duration) Options {
+ o.requestTimeout = t
+ return o
+}
+
func (o options) RetryOptions() retry.Options {
return o.retryOpts
}
@@ -298,6 +339,39 @@ func (o options) SetRetryOptions(retryOpts retry.Options) Options {
return o
}
+//nolint:gocritic
+func (o options) WatchChanCheckInterval() time.Duration {
+ return o.watchChanCheckInterval
+}
+
+//nolint:gocritic
+func (o options) SetWatchChanCheckInterval(t time.Duration) Options {
+ o.watchChanCheckInterval = t
+ return o
+}
+
+//nolint:gocritic
+func (o options) WatchChanResetInterval() time.Duration {
+ return o.watchChanResetInterval
+}
+
+//nolint:gocritic
+func (o options) SetWatchChanResetInterval(t time.Duration) Options {
+ o.watchChanResetInterval = t
+ return o
+}
+
+//nolint:gocritic
+func (o options) WatchChanInitTimeout() time.Duration {
+ return o.watchChanInitTimeout
+}
+
+//nolint:gocritic
+func (o options) SetWatchChanInitTimeout(t time.Duration) Options {
+ o.watchChanInitTimeout = t
+ return o
+}
+
func (o options) WatchWithRevision() int64 {
return o.watchWithRevision
}
@@ -316,6 +390,17 @@ func (o options) NewDirectoryMode() os.FileMode {
return o.newDirectoryMode
}
+//nolint:gocritic
+func (o options) EnableFastGets() bool {
+ return o.enableFastGets
+}
+
+//nolint:gocritic
+func (o options) SetEnableFastGets(enabled bool) Options {
+ o.enableFastGets = enabled
+ return o
+}
+
// NewCluster creates a Cluster.
func NewCluster() Cluster {
return cluster{
diff --git a/src/cluster/client/etcd/options_test.go b/src/cluster/client/etcd/options_test.go
index befd638960..235ea13d46 100644
--- a/src/cluster/client/etcd/options_test.go
+++ b/src/cluster/client/etcd/options_test.go
@@ -98,6 +98,7 @@ func TestTLSOptions(t *testing.T) {
assert.Equal(t, "key", aOpts.KeyPath())
assert.Equal(t, "ca", aOpts.CACrtPath())
}
+
func TestOptions(t *testing.T) {
opts := NewOptions()
assert.Equal(t, "", opts.Zone())
@@ -111,6 +112,17 @@ func TestOptions(t *testing.T) {
_, ok := opts.ClusterForZone("z")
assert.False(t, ok)
assert.NotNil(t, opts.InstrumentOptions())
+ assert.Equal(t, defaultRequestTimeout, opts.RequestTimeout())
+ assert.Equal(t, defaultWatchChanCheckInterval, opts.WatchChanCheckInterval())
+ assert.Equal(t, defaultWatchChanResetInterval, opts.WatchChanCheckInterval())
+ assert.Equal(t, defaultWatchChanInitTimeout, opts.WatchChanInitTimeout())
+ assert.False(t, opts.EnableFastGets())
+ ropts := opts.RetryOptions()
+ assert.Equal(t, defaultRetryJitter, ropts.Jitter())
+ assert.Equal(t, defaultRetryInitialBackoff, ropts.InitialBackoff())
+ assert.Equal(t, defaultRetryBackoffFactor, ropts.BackoffFactor())
+ assert.Equal(t, defaultRetryMaxRetries, ropts.MaxRetries())
+ assert.Equal(t, defaultRetryMaxBackoff, ropts.MaxBackoff())
c1 := NewCluster().SetZone("z1")
c2 := NewCluster().SetZone("z2")
diff --git a/src/cluster/client/etcd/types.go b/src/cluster/client/etcd/types.go
index a387357890..151c5eccda 100644
--- a/src/cluster/client/etcd/types.go
+++ b/src/cluster/client/etcd/types.go
@@ -33,6 +33,11 @@ import (
// Options is the Options to create a config service client.
type Options interface {
+ // RequestTimeout is the timeout for etcd requests
+ RequestTimeout() time.Duration
+ // SetRequestTimeout sets the RequestTimeout
+ SetRequestTimeout(t time.Duration) Options
+
Env() string
SetEnv(e string) Options
@@ -58,9 +63,30 @@ type Options interface {
RetryOptions() retry.Options
SetRetryOptions(retryOpts retry.Options) Options
+ // WatchChanCheckInterval will be used to periodically check if a watch chan
+ // is no longer being subscribed and should be closed
+ WatchChanCheckInterval() time.Duration
+ // SetWatchChanCheckInterval sets the WatchChanCheckInterval
+ SetWatchChanCheckInterval(t time.Duration) Options
+
+ // WatchChanResetInterval is the delay before resetting the etcd watch chan
+ WatchChanResetInterval() time.Duration
+ // SetWatchChanResetInterval sets the WatchChanResetInterval
+ SetWatchChanResetInterval(t time.Duration) Options
+
+ // WatchChanInitTimeout is the timeout for a watchChan initialization
+ WatchChanInitTimeout() time.Duration
+ // SetWatchChanInitTimeout sets the WatchChanInitTimeout
+ SetWatchChanInitTimeout(t time.Duration) Options
+
WatchWithRevision() int64
SetWatchWithRevision(rev int64) Options
+ // EnableFastGets returns whether to use clientv3.WithSerializable() option to speed up gets.
+ EnableFastGets() bool
+ // SetEnableFastGets sets clientv3.WithSerializable() to speed up gets, but can fetch stale data.
+ SetEnableFastGets(enabled bool) Options
+
SetNewDirectoryMode(fm os.FileMode) Options
NewDirectoryMode() os.FileMode
diff --git a/src/cluster/etcd/watchmanager/manager_test.go b/src/cluster/etcd/watchmanager/manager_test.go
index d3720132eb..3cd9e1646c 100644
--- a/src/cluster/etcd/watchmanager/manager_test.go
+++ b/src/cluster/etcd/watchmanager/manager_test.go
@@ -33,6 +33,8 @@ import (
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/integration"
"golang.org/x/net/context"
+
+ "github.com/m3db/m3/src/x/clock"
)
func TestWatchChan(t *testing.T) {
@@ -242,13 +244,9 @@ func TestWatchNoLeader(t *testing.T) {
require.NoError(t, err)
// give some time for watch to be updated
- for i := 0; i < 10; i++ {
- if atomic.LoadInt32(&updateCalled) == int32(2) {
- break
- }
- time.Sleep(watchInitAndRetryDelay)
- runtime.Gosched()
- }
+ require.True(t, clock.WaitUntil(func() bool {
+ return atomic.LoadInt32(&updateCalled) >= 2
+ }, 30*time.Second))
updates := atomic.LoadInt32(&updateCalled)
if updates < 2 {
@@ -295,9 +293,10 @@ func TestWatchCompactedRevision(t *testing.T) {
})
go wh.Watch("foo")
- time.Sleep(3 * wh.opts.WatchChanInitTimeout())
- assert.Equal(t, int32(3), atomic.LoadInt32(updateCalled))
+ require.True(t, clock.WaitUntil(func() bool {
+ return atomic.LoadInt32(updateCalled) == 3
+ }, 30*time.Second))
lastRead := atomic.LoadInt32(updateCalled)
ec.Put(context.Background(), "foo", "bar-11")
diff --git a/src/cluster/kv/etcd/options.go b/src/cluster/kv/etcd/options.go
index 7b2caa23f0..78700c5865 100644
--- a/src/cluster/kv/etcd/options.go
+++ b/src/cluster/kv/etcd/options.go
@@ -79,6 +79,11 @@ type Options interface {
// WatchWithRevision is the revision that watch requests will start from.
WatchWithRevision() int64
+ // EnableFastGets returns whether to use clientv3.WithSerializable() option to speed up gets.
+ EnableFastGets() bool
+ // SetEnableFastGets sets clientv3.WithSerializable() to speed up gets, but can fetch stale data.
+ SetEnableFastGets(enabled bool) Options
+
// SetWatchWithRevision sets the revision that watch requests will start
// from.
SetWatchWithRevision(rev int64) Options
@@ -111,6 +116,7 @@ type options struct {
watchChanResetInterval time.Duration
watchChanInitTimeout time.Duration
watchWithRevision int64
+ enableFastGets bool
cacheFileFn CacheFileFn
newDirectoryMode os.FileMode
}
@@ -141,6 +147,18 @@ func (o options) Validate() error {
return errors.New("invalid watch channel check interval")
}
+ if o.watchChanResetInterval <= 0 {
+ return errors.New("invalid watch reset interval")
+ }
+
+ if o.watchChanInitTimeout <= 0 {
+ return errors.New("invalid watch init interval")
+ }
+
+ if o.requestTimeout <= 0 {
+ return errors.New("invalid request timeout")
+ }
+
return nil
}
@@ -207,6 +225,17 @@ func (o options) SetWatchWithRevision(rev int64) Options {
return o
}
+//nolint:gocritic
+func (o options) EnableFastGets() bool {
+ return o.enableFastGets
+}
+
+//nolint:gocritic
+func (o options) SetEnableFastGets(enabled bool) Options {
+ o.enableFastGets = enabled
+ return o
+}
+
func (o options) CacheFileFn() CacheFileFn {
return o.cacheFileFn
}
diff --git a/src/cluster/kv/etcd/options_test.go b/src/cluster/kv/etcd/options_test.go
new file mode 100644
index 0000000000..60b3dabeb3
--- /dev/null
+++ b/src/cluster/kv/etcd/options_test.go
@@ -0,0 +1,45 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package etcd
+
+import (
+ "math"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestOptions(t *testing.T) {
+ opts := NewOptions()
+ assert.NoError(t, opts.Validate())
+ assert.Equal(t, defaultRequestTimeout, opts.RequestTimeout())
+ assert.Equal(t, defaultWatchChanCheckInterval, opts.WatchChanCheckInterval())
+ assert.Equal(t, defaultWatchChanResetInterval, opts.WatchChanCheckInterval())
+ assert.Equal(t, defaultWatchChanInitTimeout, opts.WatchChanInitTimeout())
+ assert.False(t, opts.EnableFastGets())
+ ropts := opts.RetryOptions()
+ assert.Equal(t, true, ropts.Jitter())
+ assert.Equal(t, time.Second, ropts.InitialBackoff())
+ assert.EqualValues(t, 2, ropts.BackoffFactor())
+ assert.EqualValues(t, 5, ropts.MaxRetries())
+ assert.Equal(t, time.Duration(math.MaxInt64), ropts.MaxBackoff())
+}
diff --git a/src/cluster/kv/etcd/store.go b/src/cluster/kv/etcd/store.go
index efbd3643e6..cf0f6b4540 100644
--- a/src/cluster/kv/etcd/store.go
+++ b/src/cluster/kv/etcd/store.go
@@ -153,7 +153,11 @@ func (c *client) get(key string) (kv.Value, error) {
ctx, cancel := c.context()
defer cancel()
- r, err := c.kv.Get(ctx, key)
+ var opts []clientv3.OpOption
+ if c.opts.EnableFastGets() {
+ opts = append(opts, clientv3.WithSerializable())
+ }
+ r, err := c.kv.Get(ctx, key, opts...)
if err != nil {
c.m.etcdGetError.Inc(1)
cachedV, ok := c.getCache(key)
diff --git a/src/cluster/kv/etcd/store_test.go b/src/cluster/kv/etcd/store_test.go
index bbba9f9966..70fb5e10a9 100644
--- a/src/cluster/kv/etcd/store_test.go
+++ b/src/cluster/kv/etcd/store_test.go
@@ -1141,7 +1141,33 @@ func TestWatchWithStartRevision(t *testing.T) {
verifyValue(t, w1.Get(), "bar-50", 50)
})
}
+}
+
+func TestSerializedGets(t *testing.T) {
+ ec, opts, closeFn := testStore(t)
+ defer closeFn()
+
+ opts = opts.SetEnableFastGets(true)
+ require.NoError(t, opts.Validate())
+
+ store, err := NewStore(ec, opts)
+ require.NoError(t, err)
+ v, err := store.Set("foo", genProto("bar"))
+ require.EqualValues(t, 1, v)
+ require.NoError(t, err)
+
+ val, err := store.Get("foo")
+ verifyValue(t, val, "bar", 1)
+ require.NoError(t, err)
+
+ v, err = store.Set("foo", genProto("42"))
+ require.EqualValues(t, 2, v)
+ require.NoError(t, err)
+
+ val, err = store.Get("foo")
+ verifyValue(t, val, "42", 2)
+ require.NoError(t, err)
}
func verifyValue(t *testing.T, v kv.Value, value string, version int) {
diff --git a/src/cmd/services/m3coordinator/downsample/downsampler.go b/src/cmd/services/m3coordinator/downsample/downsampler.go
index 9a635758cc..0e5fc69689 100644
--- a/src/cmd/services/m3coordinator/downsample/downsampler.go
+++ b/src/cmd/services/m3coordinator/downsample/downsampler.go
@@ -127,15 +127,16 @@ func defaultMetricsAppenderOptions(opts DownsamplerOptions, agg agg) metricsAppe
}
return metricsAppenderOptions{
- agg: agg.aggregator,
- clientRemote: agg.clientRemote,
- clockOpts: agg.clockOpts,
- tagEncoderPool: agg.pools.tagEncoderPool,
- matcher: agg.matcher,
- metricTagsIteratorPool: agg.pools.metricTagsIteratorPool,
- debugLogging: debugLogging,
- logger: logger,
- augmentM3Tags: agg.augmentM3Tags,
+ agg: agg.aggregator,
+ clientRemote: agg.clientRemote,
+ clockOpts: agg.clockOpts,
+ tagEncoderPool: agg.pools.tagEncoderPool,
+ matcher: agg.matcher,
+ metricTagsIteratorPool: agg.pools.metricTagsIteratorPool,
+ debugLogging: debugLogging,
+ logger: logger,
+ augmentM3Tags: agg.augmentM3Tags,
+ includeRollupsOnDefaultRuleFiltering: agg.includeRollupsOnDefaultRuleFiltering,
}
}
diff --git a/src/cmd/services/m3coordinator/downsample/downsampler_test.go b/src/cmd/services/m3coordinator/downsample/downsampler_test.go
index 25241157f3..72dc1aebb5 100644
--- a/src/cmd/services/m3coordinator/downsample/downsampler_test.go
+++ b/src/cmd/services/m3coordinator/downsample/downsampler_test.go
@@ -124,6 +124,103 @@ func TestDownsamplerAggregationWithAutoMappingRulesFromNamespacesWatcher(t *test
testDownsamplerAggregation(t, testDownsampler)
}
+func TestDownsamplerAggregationDownsamplesRawMetricWithRollupRule(t *testing.T) {
+ t.Parallel()
+
+ gaugeMetric := testGaugeMetric{
+ tags: map[string]string{
+ nameTag: "http_requests",
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ "not_rolled_up": "not_rolled_up_value",
+ },
+ timedSamples: []testGaugeMetricTimedSample{
+ {value: 42},
+ {value: 64, offset: 1 * time.Second},
+ },
+ }
+ res := 1 * time.Second
+ ret := 30 * 24 * time.Hour
+ testDownsampler := newTestDownsampler(t, testDownsamplerOptions{
+ rulesConfig: &RulesConfiguration{
+ RollupRules: []RollupRuleConfiguration{
+ {
+ Filter: fmt.Sprintf(
+ "%s:http_requests app:* status_code:* endpoint:*",
+ nameTag),
+ Transforms: []TransformConfiguration{
+ {
+ Transform: &TransformOperationConfiguration{
+ Type: transformation.PerSecond,
+ },
+ },
+ {
+ Rollup: &RollupOperationConfiguration{
+ MetricName: "http_requests_by_status_code",
+ GroupBy: []string{"app", "status_code", "endpoint"},
+ Aggregations: []aggregation.Type{aggregation.Sum},
+ },
+ },
+ },
+ StoragePolicies: []StoragePolicyConfiguration{
+ {
+ Resolution: res,
+ Retention: ret,
+ },
+ },
+ },
+ },
+ },
+ ingest: &testDownsamplerOptionsIngest{
+ gaugeMetrics: []testGaugeMetric{gaugeMetric},
+ },
+ expect: &testDownsamplerOptionsExpect{
+ writes: []testExpectedWrite{
+ // aggregated rollup metric
+ {
+ tags: map[string]string{
+ nameTag: "http_requests_by_status_code",
+ string(rollupTagName): string(rollupTagValue),
+ "app": "nginx_edge",
+ "status_code": "500",
+ "endpoint": "/foo/bar",
+ },
+ values: []expectedValue{{value: 22}},
+ attributes: &storagemetadata.Attributes{
+ MetricsType: storagemetadata.AggregatedMetricsType,
+ Resolution: res,
+ Retention: ret,
+ },
+ },
+ // raw aggregated metric
+ {
+ tags: gaugeMetric.tags,
+ values: []expectedValue{{value: 42}, {value: 64}},
+ },
+ },
+ },
+ })
+
+ // Setup auto-mapping rules.
+ require.False(t, testDownsampler.downsampler.Enabled())
+ origStagedMetadata := originalStagedMetadata(t, testDownsampler)
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+ session := dbclient.NewMockSession(ctrl)
+ setAggregatedNamespaces(t, testDownsampler, session, m3.AggregatedClusterNamespaceDefinition{
+ NamespaceID: ident.StringID("1s:30d"),
+ Resolution: res,
+ Retention: ret,
+ Session: session,
+ })
+ waitForStagedMetadataUpdate(t, testDownsampler, origStagedMetadata)
+ require.True(t, testDownsampler.downsampler.Enabled())
+
+ // Test expected output
+ testDownsamplerAggregation(t, testDownsampler)
+}
+
func TestDownsamplerAggregationToggleEnabled(t *testing.T) {
t.Parallel()
diff --git a/src/cmd/services/m3coordinator/downsample/metrics_appender.go b/src/cmd/services/m3coordinator/downsample/metrics_appender.go
index d073e813ee..46e26c0a04 100644
--- a/src/cmd/services/m3coordinator/downsample/metrics_appender.go
+++ b/src/cmd/services/m3coordinator/downsample/metrics_appender.go
@@ -98,11 +98,12 @@ type metricsAppenderOptions struct {
agg aggregator.Aggregator
clientRemote client.Client
- defaultStagedMetadatasProtos []metricpb.StagedMetadatas
- matcher matcher.Matcher
- tagEncoderPool serialize.TagEncoderPool
- metricTagsIteratorPool serialize.MetricTagsIteratorPool
- augmentM3Tags bool
+ defaultStagedMetadatasProtos []metricpb.StagedMetadatas
+ matcher matcher.Matcher
+ tagEncoderPool serialize.TagEncoderPool
+ metricTagsIteratorPool serialize.MetricTagsIteratorPool
+ augmentM3Tags bool
+ includeRollupsOnDefaultRuleFiltering bool
clockOpts clock.Options
debugLogging bool
@@ -230,23 +231,37 @@ func (a *metricsAppender) SamplesAppender(opts SampleAppenderOptions) (SamplesAp
// name and tags (i.e. overwriting each other).
a.mappingRuleStoragePolicies = a.mappingRuleStoragePolicies[:0]
- mappingRuleStagedMetadatas := matchResult.ForExistingIDAt(nowNanos)
- if !mappingRuleStagedMetadatas.IsDefault() && len(mappingRuleStagedMetadatas) != 0 {
- a.debugLogMatch("downsampler applying matched mapping rule",
- debugLogMatchOptions{Meta: mappingRuleStagedMetadatas})
+ ruleStagedMetadatas := matchResult.ForExistingIDAt(nowNanos)
+ if !ruleStagedMetadatas.IsDefault() && len(ruleStagedMetadatas) != 0 {
+ a.debugLogMatch("downsampler applying matched rule",
+ debugLogMatchOptions{Meta: ruleStagedMetadatas})
- // Collect all the current active mapping rules
- for _, stagedMetadata := range mappingRuleStagedMetadatas {
+ // Collect storage policies for all the current active mapping rules.
+ // TODO: we should convert this to iterate over pointers
+ // nolint:gocritic
+ for _, stagedMetadata := range ruleStagedMetadatas {
for _, pipe := range stagedMetadata.Pipelines {
- for _, sp := range pipe.StoragePolicies {
- a.mappingRuleStoragePolicies =
- append(a.mappingRuleStoragePolicies, sp)
+ // Skip rollup rules unless configured otherwise.
+ // We only want to consider mapping rules here,
+ // as we still want to apply default mapping rules to
+ // metrics that are rolled up to ensure the underlying metric
+ // gets written to aggregated namespaces.
+ if a.includeRollupsOnDefaultRuleFiltering || pipe.IsMappingRule() {
+ for _, sp := range pipe.StoragePolicies {
+ a.mappingRuleStoragePolicies =
+ append(a.mappingRuleStoragePolicies, sp)
+ }
+ } else {
+ a.debugLogMatch(
+ "skipping rollup rule in populating active mapping rule policies",
+ debugLogMatchOptions{},
+ )
}
}
}
// Only sample if going to actually aggregate
- pipelines := mappingRuleStagedMetadatas[len(mappingRuleStagedMetadatas)-1]
+ pipelines := ruleStagedMetadatas[len(ruleStagedMetadatas)-1]
a.curr.Pipelines =
append(a.curr.Pipelines, pipelines.Pipelines...)
}
diff --git a/src/cmd/services/m3coordinator/downsample/options.go b/src/cmd/services/m3coordinator/downsample/options.go
index f55e1c4acb..65e09160f5 100644
--- a/src/cmd/services/m3coordinator/downsample/options.go
+++ b/src/cmd/services/m3coordinator/downsample/options.go
@@ -225,10 +225,11 @@ type agg struct {
aggregator aggregator.Aggregator
clientRemote client.Client
- clockOpts clock.Options
- matcher matcher.Matcher
- pools aggPools
- augmentM3Tags bool
+ clockOpts clock.Options
+ matcher matcher.Matcher
+ pools aggPools
+ augmentM3Tags bool
+ includeRollupsOnDefaultRuleFiltering bool
}
// Configuration configurates a downsampler.
@@ -263,9 +264,6 @@ type Configuration struct {
// EntryTTL determines how long an entry remains alive before it may be expired due to inactivity.
EntryTTL time.Duration `yaml:"entryTTL"`
- // DisableAutoMappingRules disables auto mapping rules.
- DisableAutoMappingRules bool `yaml:"disableAutoMappingRules"`
-
// AugmentM3Tags will augment the metric type to aggregated metrics
// to be used within the filter for rules. If enabled, for example,
// your filter can specify '__m3_type__:gauge' to filter by gauges.
@@ -273,6 +271,14 @@ type Configuration struct {
// Furthermore, the option is automatically enabled if static rules are
// used and any filter contain an __m3_type__ tag.
AugmentM3Tags bool `yaml:"augmentM3Tags"`
+
+ // IncludeRollupsOnDefaultRuleFiltering will include rollup rules
+ // when deciding if the downsampler should ignore the default auto mapping rules
+ // based on the storage policies applied on a given rule.
+ // This is usually not what you want to do, as it means the raw metric
+ // that is being rolled up by your rule will not be forward into aggregated namespaces,
+ // and will only be written to the unaggregated namespace.
+ IncludeRollupsOnDefaultRuleFiltering bool `yaml:"includeRollupsOnDefaultRuleFiltering"`
}
// MatcherConfiguration is the configuration for the rule matcher.
@@ -286,8 +292,8 @@ type MatcherConfiguration struct {
// MatcherCacheConfiguration is the configuration for the rule matcher cache.
type MatcherCacheConfiguration struct {
- // Capacity if non-zero will set the capacity of the rules matching cache.
- Capacity int `yaml:"capacity"`
+ // Capacity if set the capacity of the rules matching cache.
+ Capacity *int `yaml:"capacity"`
}
// RulesConfiguration is a set of rules configuration to use for downsampling.
@@ -761,8 +767,8 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
}
matcherCacheCapacity := defaultMatcherCacheCapacity
- if v := cfg.Matcher.Cache.Capacity; v > 0 {
- matcherCacheCapacity = v
+ if v := cfg.Matcher.Cache.Capacity; v != nil {
+ matcherCacheCapacity = *v
}
matcher, err := o.newAggregatorMatcher(matcherOpts, matcherCacheCapacity)
@@ -791,10 +797,11 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
}
return agg{
- clientRemote: client,
- matcher: matcher,
- pools: pools,
- augmentM3Tags: augmentM3Tags,
+ clientRemote: client,
+ matcher: matcher,
+ pools: pools,
+ augmentM3Tags: augmentM3Tags,
+ includeRollupsOnDefaultRuleFiltering: cfg.IncludeRollupsOnDefaultRuleFiltering,
}, nil
}
@@ -956,10 +963,11 @@ func (cfg Configuration) newAggregator(o DownsamplerOptions) (agg, error) {
}
return agg{
- aggregator: aggregatorInstance,
- matcher: matcher,
- pools: pools,
- augmentM3Tags: augmentM3Tags,
+ aggregator: aggregatorInstance,
+ matcher: matcher,
+ pools: pools,
+ augmentM3Tags: augmentM3Tags,
+ includeRollupsOnDefaultRuleFiltering: cfg.IncludeRollupsOnDefaultRuleFiltering,
}, nil
}
@@ -1047,14 +1055,19 @@ func (o DownsamplerOptions) newAggregatorMatcher(
opts matcher.Options,
capacity int,
) (matcher.Matcher, error) {
- cacheOpts := cache.NewOptions().
- SetCapacity(capacity).
- SetClockOptions(opts.ClockOptions()).
- SetInstrumentOptions(opts.InstrumentOptions().
- SetMetricsScope(opts.InstrumentOptions().MetricsScope().SubScope("matcher-cache")))
-
- cache := cache.NewCache(cacheOpts)
- return matcher.NewMatcher(cache, opts)
+ var matcherCache cache.Cache
+ if capacity > 0 {
+ scope := opts.InstrumentOptions().MetricsScope().SubScope("matcher-cache")
+ instrumentOpts := opts.InstrumentOptions().
+ SetMetricsScope(scope)
+ cacheOpts := cache.NewOptions().
+ SetCapacity(capacity).
+ SetClockOptions(opts.ClockOptions()).
+ SetInstrumentOptions(instrumentOpts)
+ matcherCache = cache.NewCache(cacheOpts)
+ }
+
+ return matcher.NewMatcher(matcherCache, opts)
}
func (o DownsamplerOptions) newAggregatorPlacementManager(
diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go
index 085bfd734d..42fa0637a8 100644
--- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go
+++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go
@@ -18,6 +18,7 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
+// Package ingestcarbon implements a carbon ingester.
package ingestcarbon
import (
@@ -73,7 +74,7 @@ var (
type Options struct {
InstrumentOptions instrument.Options
WorkerPool xsync.PooledWorkerPool
- IngesterConfig *config.CarbonIngesterConfiguration
+ IngesterConfig config.CarbonIngesterConfiguration
}
// CarbonIngesterRules contains the carbon ingestion rules.
@@ -86,11 +87,9 @@ func (o *Options) Validate() error {
if o.InstrumentOptions == nil {
return errIOptsMustBeSet
}
-
if o.WorkerPool == nil {
return errWorkerPoolMustBeSet
}
-
return nil
}
@@ -126,14 +125,19 @@ func NewIngester(
}
})
+ scope := opts.InstrumentOptions.MetricsScope()
+ metrics, err := newCarbonIngesterMetrics(scope)
+ if err != nil {
+ return nil, err
+ }
+
ingester := &ingester{
downsamplerAndWriter: downsamplerAndWriter,
opts: opts,
logger: opts.InstrumentOptions.Logger(),
tagOpts: tagOpts,
- metrics: newCarbonIngesterMetrics(
- opts.InstrumentOptions.MetricsScope()),
- lineResourcesPool: resourcePool,
+ metrics: metrics,
+ lineResourcesPool: resourcePool,
}
// No need to retain watch as NamespaceWatcher.Close() will handle closing any watches
// generated by creating listeners.
@@ -152,7 +156,7 @@ type ingester struct {
lineResourcesPool pool.ObjectPool
sync.RWMutex
- rules []ruleAndRegex
+ rules []ruleAndMatcher
}
func (i *ingester) OnUpdate(clusterNamespaces m3.ClusterNamespaces) {
@@ -272,19 +276,22 @@ func (i *ingester) Handle(conn net.Conn) {
// Interfaces require a context be passed, but M3DB client already has timeouts
// built in and allocating a new context each time is expensive so we just pass
// the same context always and rely on M3DB client timeouts.
- ctx = context.Background()
- wg = sync.WaitGroup{}
- s = carbon.NewScanner(conn, i.opts.InstrumentOptions)
- logger = i.opts.InstrumentOptions.Logger()
+ ctx = context.Background()
+ wg = sync.WaitGroup{}
+ s = carbon.NewScanner(conn, i.opts.InstrumentOptions)
+ logger = i.opts.InstrumentOptions.Logger()
+ rewrite = &i.opts.IngesterConfig.Rewrite
)
logger.Debug("handling new carbon ingestion connection")
for s.Scan() {
+ received := time.Now()
name, timestamp, value := s.Metric()
resources := i.getLineResources()
+
// Copy name since scanner bytes are recycled.
- resources.name = append(resources.name[:0], name...)
+ resources.name = copyAndRewrite(resources.name, name, rewrite)
wg.Add(1)
i.opts.WorkerPool.Go(func() {
@@ -292,6 +299,19 @@ func (i *ingester) Handle(conn net.Conn) {
if ok {
i.metrics.success.Inc(1)
}
+
+ now := time.Now()
+
+ // Always record age regardless of success/failure since
+ // sometimes errors can be due to how old the metrics are
+ // and not recording age would obscure this visibility from
+ // the metrics of how fresh/old the incoming metrics are.
+ age := now.Sub(timestamp)
+ i.metrics.ingestLatency.RecordDuration(age)
+
+ // Also record write latency (not relative to metric timestamp).
+ i.metrics.writeLatency.RecordDuration(now.Sub(received))
+
// The contract is that after the DownsamplerAndWriter returns, any resources
// that it needed to hold onto have already been copied.
i.putLineResources(resources)
@@ -351,7 +371,17 @@ func (i *ingester) write(
i.RUnlock()
for _, rule := range rules {
- if rule.rule.Pattern == graphite.MatchAllPattern || rule.regexp.Match(resources.name) {
+ var matches bool
+ switch {
+ case rule.rule.Pattern == graphite.MatchAllPattern:
+ matches = true
+ case rule.regexp != nil:
+ matches = rule.regexp.Match(resources.name)
+ case len(rule.contains) != 0:
+ matches = bytes.Contains(resources.name, rule.contains)
+ }
+
+ if matches {
// Each rule should only have either mapping rules or storage policies so
// one of these should be a no-op.
downsampleAndStoragePolicies.DownsampleMappingRules = rule.mappingRules
@@ -401,10 +431,8 @@ func (i *ingester) writeWithOptions(
return err
}
- err = i.downsamplerAndWriter.Write(
- ctx, tags, resources.datapoints, xtime.Second, nil, opts,
- )
-
+ err = i.downsamplerAndWriter.Write(ctx, tags, resources.datapoints,
+ xtime.Second, nil, opts)
if err != nil {
i.logger.Error("err writing carbon metric",
zap.String("name", string(resources.name)), zap.Error(err))
@@ -419,18 +447,26 @@ func (i *ingester) Close() {
// We don't maintain any state in-between connections so there is nothing to do here.
}
-func newCarbonIngesterMetrics(m tally.Scope) carbonIngesterMetrics {
- return carbonIngesterMetrics{
- success: m.Counter("success"),
- err: m.Counter("error"),
- malformed: m.Counter("malformed"),
- }
+type carbonIngesterMetrics struct {
+ success tally.Counter
+ err tally.Counter
+ malformed tally.Counter
+ ingestLatency tally.Histogram
+ writeLatency tally.Histogram
}
-type carbonIngesterMetrics struct {
- success tally.Counter
- err tally.Counter
- malformed tally.Counter
+func newCarbonIngesterMetrics(scope tally.Scope) (carbonIngesterMetrics, error) {
+ buckets, err := ingest.NewLatencyBuckets()
+ if err != nil {
+ return carbonIngesterMetrics{}, err
+ }
+ return carbonIngesterMetrics{
+ success: scope.Counter("success"),
+ err: scope.Counter("error"),
+ malformed: scope.Counter("malformed"),
+ writeLatency: scope.SubScope("write").Histogram("latency", buckets.WriteLatencyBuckets),
+ ingestLatency: scope.SubScope("ingest").Histogram("latency", buckets.IngestLatencyBuckets),
+ }, nil
}
// GenerateTagsFromName accepts a carbon metric name and blows it up into a list of
@@ -511,7 +547,7 @@ func generateTagsFromName(
return models.Tags{Opts: opts, Tags: tags}, nil
}
-// Compile all the carbon ingestion rules into regexp so that we can
+// Compile all the carbon ingestion rules into matcher so that we can
// perform matching. Also, generate all the mapping rules and storage
// policies that we will need to pass to the DownsamplerAndWriter upfront
// so that we don't need to create them each time.
@@ -519,12 +555,27 @@ func generateTagsFromName(
// Note that only one rule will be applied per metric and rules are applied
// such that the first one that matches takes precedence. As a result we need
// to make sure to maintain the order of the rules when we generate the compiled ones.
-func (i *ingester) compileRulesWithLock(rules CarbonIngesterRules) ([]ruleAndRegex, error) {
- var compiledRules []ruleAndRegex
+func (i *ingester) compileRulesWithLock(rules CarbonIngesterRules) ([]ruleAndMatcher, error) {
+ compiledRules := make([]ruleAndMatcher, 0, len(rules.Rules))
for _, rule := range rules.Rules {
- compiled, err := regexp.Compile(rule.Pattern)
- if err != nil {
- return nil, err
+ if rule.Pattern != "" && rule.Contains != "" {
+ return nil, fmt.Errorf(
+ "rule contains both pattern and contains: pattern=%s, contains=%s",
+ rule.Pattern, rule.Contains)
+ }
+
+ var (
+ contains []byte
+ compiled *regexp.Regexp
+ )
+ if rule.Contains != "" {
+ contains = []byte(rule.Contains)
+ } else {
+ var err error
+ compiled, err = regexp.Compile(rule.Pattern)
+ if err != nil {
+ return nil, err
+ }
}
storagePolicies := make([]policy.StoragePolicy, 0, len(rule.Policies))
@@ -534,9 +585,10 @@ func (i *ingester) compileRulesWithLock(rules CarbonIngesterRules) ([]ruleAndReg
storagePolicies = append(storagePolicies, storagePolicy)
}
- compiledRule := ruleAndRegex{
- rule: rule,
- regexp: compiled,
+ compiledRule := ruleAndMatcher{
+ rule: rule,
+ contains: contains,
+ regexp: compiled,
}
if rule.Aggregation.EnabledOrDefault() {
@@ -587,9 +639,10 @@ type lineResources struct {
tags []models.Tag
}
-type ruleAndRegex struct {
+type ruleAndMatcher struct {
rule config.CarbonIngesterRuleConfiguration
regexp *regexp.Regexp
+ contains []byte
mappingRules []downsample.AutoMappingRule
storagePolicies []policy.StoragePolicy
}
diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go
index a57e507cb9..ff8d1542d9 100644
--- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go
+++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go
@@ -92,14 +92,23 @@ var (
},
},
}
+)
+
+type testRulesOptions struct {
+ substring string
+ prefix string
+ suffix string
+}
- // Match match-regex1 twice with two patterns, and in one case with two policies
- // and in the second with one policy. In addition, also match match-regex2 with
- // a single pattern and policy.
- testRulesWithPatterns = CarbonIngesterRules{
+func testRules(opts testRulesOptions) CarbonIngesterRules {
+ // Match prefix + substring + "1" + suffix twice with two patterns, and
+ // in one case with two policies and in the second with one policy. In
+ // addition, also match prefix + substring + "2" + suffix wit a single
+ // pattern and policy.
+ return CarbonIngesterRules{
Rules: []config.CarbonIngesterRuleConfiguration{
{
- Pattern: ".*match-regex1.*",
+ Pattern: opts.prefix + opts.substring + "1" + opts.suffix,
Aggregation: config.CarbonIngesterAggregationConfiguration{
Enabled: truePtr,
Type: aggregateMeanPtr,
@@ -117,7 +126,7 @@ var (
},
// Should never match as the previous one takes precedence.
{
- Pattern: ".*match-regex1.*",
+ Pattern: opts.prefix + opts.substring + "1" + opts.suffix,
Aggregation: config.CarbonIngesterAggregationConfiguration{
Enabled: truePtr,
Type: aggregateMeanPtr,
@@ -130,7 +139,7 @@ var (
},
},
{
- Pattern: ".*match-regex2.*",
+ Pattern: opts.prefix + opts.substring + "2" + opts.suffix,
Aggregation: config.CarbonIngesterAggregationConfiguration{
Enabled: truePtr,
Type: aggregateLastPtr,
@@ -143,7 +152,7 @@ var (
},
},
{
- Pattern: ".*match-regex3.*",
+ Pattern: opts.prefix + opts.substring + "3" + opts.suffix,
Aggregation: config.CarbonIngesterAggregationConfiguration{
Enabled: falsePtr,
},
@@ -156,10 +165,12 @@ var (
},
},
}
+}
- // Maps the patterns above to their expected write options.
- expectedWriteOptsByPattern = map[string]ingest.WriteOptions{
- "match-regex1": {
+func testExpectedWriteOptions(substring string) map[string]ingest.WriteOptions {
+ // Maps the rules above to their expected write options.
+ return map[string]ingest.WriteOptions{
+ substring + "1": {
DownsampleOverride: true,
DownsampleMappingRules: []downsample.AutoMappingRule{
{
@@ -172,7 +183,7 @@ var (
},
WriteOverride: true,
},
- "match-regex2": {
+ substring + "2": {
DownsampleOverride: true,
DownsampleMappingRules: []downsample.AutoMappingRule{
{
@@ -182,7 +193,7 @@ var (
},
WriteOverride: true,
},
- "match-regex3": {
+ substring + "3": {
DownsampleOverride: true,
WriteOverride: true,
WriteStoragePolicies: []policy.StoragePolicy{
@@ -190,7 +201,7 @@ var (
},
},
}
-)
+}
func TestIngesterHandleConn(t *testing.T) {
ctrl := gomock.NewController(t)
@@ -214,7 +225,10 @@ func TestIngesterHandleConn(t *testing.T) {
lock.Lock()
// Clone tags because they (and their underlying bytes) are pooled.
found = append(found, testMetric{
- tags: tags.Clone(), timestamp: int(dp[0].Timestamp.Unix()), value: dp[0].Value})
+ tags: tags.Clone(),
+ timestamp: int(dp[0].Timestamp.Unix()),
+ value: dp[0].Value,
+ })
// Make 1 in 10 writes fail to test those paths.
returnErr := idx%10 == 0
@@ -243,95 +257,156 @@ func TestIngesterHandleConn(t *testing.T) {
assertTestMetricsAreEqual(t, testMetrics, found)
}
-func TestIngesterHonorsPatterns(t *testing.T) {
- ctrl := gomock.NewController(t)
- mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl)
-
- var (
- lock = sync.Mutex{}
- found = []testMetric{}
- )
- mockDownsamplerAndWriter.EXPECT().
- Write(gomock.Any(), gomock.Any(), gomock.Any(), xtime.Second, gomock.Any(), gomock.Any()).DoAndReturn(func(
- _ context.Context,
- tags models.Tags,
- dp ts.Datapoints,
- unit xtime.Unit,
- annotation []byte,
- writeOpts ingest.WriteOptions,
- ) interface{} {
- lock.Lock()
- // Clone tags because they (and their underlying bytes) are pooled.
- found = append(found, testMetric{
- tags: tags.Clone(), timestamp: int(dp[0].Timestamp.Unix()), value: dp[0].Value})
- lock.Unlock()
-
- // Use panic's instead of require/assert because those don't behave properly when the assertion
- // is run in a background goroutine. Also we match on the second tag val just due to the nature
- // of how the patterns were written.
- secondTagVal := string(tags.Tags[1].Value)
- expectedWriteOpts, ok := expectedWriteOptsByPattern[secondTagVal]
- if !ok {
- panic(fmt.Sprintf("expected write options for: %s", secondTagVal))
- }
-
- if !reflect.DeepEqual(expectedWriteOpts, writeOpts) {
- panic(fmt.Sprintf("expected %v to equal %v for metric: %s",
- expectedWriteOpts, writeOpts, secondTagVal))
- }
-
- return nil
- }).AnyTimes()
-
- packet := []byte("" +
- "foo.match-regex1.bar.baz 1 1\n" +
- "foo.match-regex2.bar.baz 2 2\n" +
- "foo.match-regex3.bar.baz 3 3\n" +
- "foo.match-not-regex.bar.baz 4 4")
- byteConn := &byteConn{b: bytes.NewBuffer(packet)}
-
- session := client.NewMockSession(ctrl)
- watcher := newTestWatcher(t, session, m3.AggregatedClusterNamespaceDefinition{
- NamespaceID: ident.StringID("10s:48h"),
- Resolution: 10 * time.Second,
- Retention: 48 * time.Hour,
- Session: session,
- }, m3.AggregatedClusterNamespaceDefinition{
- NamespaceID: ident.StringID("1m:24h"),
- Resolution: 1 * time.Minute,
- Retention: 24 * time.Hour,
- Session: session,
- }, m3.AggregatedClusterNamespaceDefinition{
- NamespaceID: ident.StringID("1h:168h"),
- Resolution: 1 * time.Hour,
- Retention: 168 * time.Hour,
- Session: session,
- })
-
- ingester, err := NewIngester(mockDownsamplerAndWriter, watcher, newTestOpts(testRulesWithPatterns))
- require.NoError(t, err)
- ingester.Handle(byteConn)
-
- assertTestMetricsAreEqual(t, []testMetric{
- {
- metric: []byte("foo.match-regex1.bar.baz"),
- tags: mustGenerateTagsFromName(t, []byte("foo.match-regex1.bar.baz")),
- timestamp: 1,
- value: 1,
- },
+func TestIngesterHonorsMatchers(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ rules CarbonIngesterRules
+ expectedWriteOptions map[string]ingest.WriteOptions
+ expectedMetrics []testMetric
+ }{
{
- metric: []byte("foo.match-regex2.bar.baz"),
- tags: mustGenerateTagsFromName(t, []byte("foo.match-regex2.bar.baz")),
- timestamp: 2,
- value: 2,
+ name: "regexp matching",
+ input: "foo.match-regex1.bar.baz 1 1\n" +
+ "foo.match-regex2.bar.baz 2 2\n" +
+ "foo.match-regex3.bar.baz 3 3\n" +
+ "foo.match-not-regex.bar.baz 4 4",
+ rules: testRules(testRulesOptions{
+ substring: "match-regex",
+ prefix: ".*",
+ suffix: ".*",
+ }),
+ expectedWriteOptions: testExpectedWriteOptions("match-regex"),
+ expectedMetrics: []testMetric{
+ {
+ metric: []byte("foo.match-regex1.bar.baz"),
+ tags: mustGenerateTagsFromName(t, []byte("foo.match-regex1.bar.baz")),
+ timestamp: 1,
+ value: 1,
+ },
+ {
+ metric: []byte("foo.match-regex2.bar.baz"),
+ tags: mustGenerateTagsFromName(t, []byte("foo.match-regex2.bar.baz")),
+ timestamp: 2,
+ value: 2,
+ },
+ {
+ metric: []byte("foo.match-regex3.bar.baz"),
+ tags: mustGenerateTagsFromName(t, []byte("foo.match-regex3.bar.baz")),
+ timestamp: 3,
+ value: 3,
+ },
+ },
},
{
- metric: []byte("foo.match-regex3.bar.baz"),
- tags: mustGenerateTagsFromName(t, []byte("foo.match-regex3.bar.baz")),
- timestamp: 3,
- value: 3,
+ name: "contains matching",
+ input: "foo.match-contains1.bar.baz 1 1\n" +
+ "foo.match-contains2.bar.baz 2 2\n" +
+ "foo.match-contains3.bar.baz 3 3\n" +
+ "foo.match-not-contains.bar.baz 4 4",
+ rules: testRules(testRulesOptions{
+ substring: "match-contains",
+ prefix: ".*",
+ suffix: ".*",
+ }),
+ expectedWriteOptions: testExpectedWriteOptions("match-contains"),
+ expectedMetrics: []testMetric{
+ {
+ metric: []byte("foo.match-contains1.bar.baz"),
+ tags: mustGenerateTagsFromName(t, []byte("foo.match-contains1.bar.baz")),
+ timestamp: 1,
+ value: 1,
+ },
+ {
+ metric: []byte("foo.match-contains2.bar.baz"),
+ tags: mustGenerateTagsFromName(t, []byte("foo.match-contains2.bar.baz")),
+ timestamp: 2,
+ value: 2,
+ },
+ {
+ metric: []byte("foo.match-contains3.bar.baz"),
+ tags: mustGenerateTagsFromName(t, []byte("foo.match-contains3.bar.baz")),
+ timestamp: 3,
+ value: 3,
+ },
+ },
},
- }, found)
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ ctrl := gomock.NewController(t)
+ mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl)
+
+ var (
+ lock = sync.Mutex{}
+ found = []testMetric{}
+ )
+ mockDownsamplerAndWriter.EXPECT().
+ Write(gomock.Any(), gomock.Any(), gomock.Any(), xtime.Second, gomock.Any(), gomock.Any()).
+ DoAndReturn(func(
+ _ context.Context,
+ tags models.Tags,
+ dp ts.Datapoints,
+ unit xtime.Unit,
+ annotation []byte,
+ writeOpts ingest.WriteOptions,
+ ) interface{} {
+ lock.Lock()
+ // Clone tags because they (and their underlying bytes) are pooled.
+ found = append(found, testMetric{
+ tags: tags.Clone(),
+ timestamp: int(dp[0].Timestamp.Unix()),
+ value: dp[0].Value,
+ })
+ lock.Unlock()
+
+ // Use panic's instead of require/assert because those don't behave properly when the assertion
+ // is run in a background goroutine. Also we match on the second tag val just due to the nature
+ // of how the patterns were written.
+ secondTagVal := string(tags.Tags[1].Value)
+ expectedWriteOpts, ok := test.expectedWriteOptions[secondTagVal]
+ if !ok {
+ panic(fmt.Sprintf("expected write options for: %s", secondTagVal))
+ }
+
+ if !reflect.DeepEqual(expectedWriteOpts, writeOpts) {
+ panic(fmt.Sprintf("expected %v to equal %v for metric: %s",
+ expectedWriteOpts, writeOpts, secondTagVal))
+ }
+
+ return nil
+ }).
+ AnyTimes()
+
+ byteConn := &byteConn{b: bytes.NewBuffer([]byte(test.input))}
+
+ session := client.NewMockSession(ctrl)
+ watcher := newTestWatcher(t, session, m3.AggregatedClusterNamespaceDefinition{
+ NamespaceID: ident.StringID("10s:48h"),
+ Resolution: 10 * time.Second,
+ Retention: 48 * time.Hour,
+ Session: session,
+ }, m3.AggregatedClusterNamespaceDefinition{
+ NamespaceID: ident.StringID("1m:24h"),
+ Resolution: 1 * time.Minute,
+ Retention: 24 * time.Hour,
+ Session: session,
+ }, m3.AggregatedClusterNamespaceDefinition{
+ NamespaceID: ident.StringID("1h:168h"),
+ Resolution: 1 * time.Hour,
+ Retention: 168 * time.Hour,
+ Session: session,
+ })
+
+ ingester, err := NewIngester(mockDownsamplerAndWriter, watcher,
+ newTestOpts(test.rules))
+ require.NoError(t, err)
+ ingester.Handle(byteConn)
+
+ assertTestMetricsAreEqual(t, test.expectedMetrics, found)
+ })
+ }
}
func TestIngesterNoStaticRules(t *testing.T) {
@@ -370,7 +445,7 @@ func TestIngesterNoStaticRules(t *testing.T) {
require.True(t, ok)
// Wait until rules are updated and store them for later comparison.
- var origRules []ruleAndRegex
+ var origRules []ruleAndMatcher
require.True(t, clock.WaitUntil(func() bool {
downcast.RLock()
origRules = downcast.rules
@@ -466,7 +541,10 @@ func newMockDownsamplerAndWriter(
lock.Lock()
// Clone tags because they (and their underlying bytes) are pooled.
*found = append(*found, testMetric{
- tags: tags.Clone(), timestamp: int(dp[0].Timestamp.Unix()), value: dp[0].Value})
+ tags: tags.Clone(),
+ timestamp: int(dp[0].Timestamp.Unix()),
+ value: dp[0].Value,
+ })
// Make 1 in 10 writes fail to test those paths.
returnErr := idx%10 == 0
@@ -544,8 +622,7 @@ func TestGenerateTagsFromName(t *testing.T) {
func newTestOpts(rules CarbonIngesterRules) Options {
cfg := config.CarbonIngesterConfiguration{Rules: rules.Rules}
opts := testOptions
- opts.IngesterConfig = &cfg
-
+ opts.IngesterConfig = cfg
return opts
}
diff --git a/src/cmd/services/m3coordinator/ingest/carbon/rewrite.go b/src/cmd/services/m3coordinator/ingest/carbon/rewrite.go
new file mode 100644
index 0000000000..b5480b7650
--- /dev/null
+++ b/src/cmd/services/m3coordinator/ingest/carbon/rewrite.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ingestcarbon
+
+import (
+ "github.com/m3db/m3/src/cmd/services/m3query/config"
+)
+
+// nolint: gocyclo
+func copyAndRewrite(
+ dst, src []byte,
+ cfg *config.CarbonIngesterRewriteConfiguration,
+) []byte {
+ if cfg == nil || !cfg.Cleanup {
+ // No rewrite required.
+ return append(dst[:0], src...)
+ }
+
+ // Copy into dst as we rewrite.
+ dst = dst[:0]
+ leadingDots := true
+ numDots := 0
+ for _, c := range src {
+ if c == '.' {
+ numDots++
+ } else {
+ numDots = 0
+ leadingDots = false
+ }
+
+ if leadingDots {
+ // Currently processing leading dots.
+ continue
+ }
+
+ if numDots > 1 {
+ // Do not keep multiple dots.
+ continue
+ }
+
+ if !(c >= 'a' && c <= 'z') &&
+ !(c >= 'A' && c <= 'Z') &&
+ !(c >= '0' && c <= '9') &&
+ c != '.' &&
+ c != '-' &&
+ c != '_' &&
+ c != ':' &&
+ c != '#' {
+ // Invalid character, replace with underscore.
+ if n := len(dst); n > 0 && dst[n-1] == '_' {
+ // Preceding character already underscore.
+ continue
+ }
+ dst = append(dst, '_')
+ continue
+ }
+
+ // Valid character and not proceeding dot or multiple dots.
+ dst = append(dst, c)
+ }
+ for i := len(dst) - 1; i >= 0; i-- {
+ if dst[i] != '.' {
+ // Found non dot.
+ break
+ }
+ // Remove trailing dot.
+ dst = dst[:i]
+ }
+ return dst
+}
diff --git a/src/cmd/services/m3coordinator/ingest/carbon/rewrite_test.go b/src/cmd/services/m3coordinator/ingest/carbon/rewrite_test.go
new file mode 100644
index 0000000000..baf12ec36c
--- /dev/null
+++ b/src/cmd/services/m3coordinator/ingest/carbon/rewrite_test.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ingestcarbon
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/m3db/m3/src/cmd/services/m3query/config"
+)
+
+func TestCopyAndRewrite(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected string
+ cfg *config.CarbonIngesterRewriteConfiguration
+ }{
+ {
+ name: "bad but no rewrite",
+ input: "foo$$.bar%%.baz@@",
+ expected: "foo$$.bar%%.baz@@",
+ cfg: nil,
+ },
+ {
+ name: "bad but no rewrite cleanup",
+ input: "foo$$.bar%%.baz@@",
+ expected: "foo$$.bar%%.baz@@",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: false,
+ },
+ },
+ {
+ name: "good with rewrite cleanup",
+ input: "foo.bar.baz",
+ expected: "foo.bar.baz",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ {
+ name: "bad with rewrite cleanup",
+ input: "foo$$.bar%%.baz@@",
+ expected: "foo_.bar_.baz_",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ {
+ name: "collapse two dots with rewrite cleanup",
+ input: "foo..bar.baz",
+ expected: "foo.bar.baz",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ {
+ name: "collapse three and two dots with rewrite cleanup",
+ input: "foo...bar..baz",
+ expected: "foo.bar.baz",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ {
+ name: "remove leading dot with rewrite cleanup",
+ input: ".foo.bar.baz",
+ expected: "foo.bar.baz",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ {
+ name: "remove multiple leading dots with rewrite cleanup",
+ input: "..foo.bar.baz",
+ expected: "foo.bar.baz",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ {
+ name: "remove trailing dot with rewrite cleanup",
+ input: "foo.bar.baz.",
+ expected: "foo.bar.baz",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ {
+ name: "remove multiple trailing dots with rewrite cleanup",
+ input: "foo.bar.baz..",
+ expected: "foo.bar.baz",
+ cfg: &config.CarbonIngesterRewriteConfiguration{
+ Cleanup: true,
+ },
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ actual := copyAndRewrite(nil, []byte(test.input), test.cfg)
+ require.Equal(t, test.expected, string(actual))
+ })
+ }
+}
diff --git a/src/cmd/services/m3coordinator/ingest/metrics.go b/src/cmd/services/m3coordinator/ingest/metrics.go
new file mode 100644
index 0000000000..ad514fc117
--- /dev/null
+++ b/src/cmd/services/m3coordinator/ingest/metrics.go
@@ -0,0 +1,90 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ingest
+
+import (
+ "time"
+
+ "github.com/uber-go/tally"
+)
+
+// LatencyBuckets are a set of latency buckets useful for measuring things.
+type LatencyBuckets struct {
+ WriteLatencyBuckets tally.DurationBuckets
+ IngestLatencyBuckets tally.DurationBuckets
+}
+
+// NewLatencyBuckets returns write and ingest latency buckets useful for
+// measuring ingest latency (i.e. time from datapoint/sample created to time
+// ingested) and write latency (i.e. time from received a sample from remote
+// source to completion of that write locally).
+func NewLatencyBuckets() (LatencyBuckets, error) {
+ upTo1sBuckets, err := tally.LinearDurationBuckets(0, 100*time.Millisecond, 10)
+ if err != nil {
+ return LatencyBuckets{}, err
+ }
+
+ upTo10sBuckets, err := tally.LinearDurationBuckets(time.Second, 500*time.Millisecond, 18)
+ if err != nil {
+ return LatencyBuckets{}, err
+ }
+
+ upTo60sBuckets, err := tally.LinearDurationBuckets(10*time.Second, 5*time.Second, 11)
+ if err != nil {
+ return LatencyBuckets{}, err
+ }
+
+ upTo60mBuckets, err := tally.LinearDurationBuckets(0, 5*time.Minute, 12)
+ if err != nil {
+ return LatencyBuckets{}, err
+ }
+ upTo60mBuckets = upTo60mBuckets[1:] // Remove the first 0s to get 5 min aligned buckets
+
+ upTo6hBuckets, err := tally.LinearDurationBuckets(time.Hour, 30*time.Minute, 12)
+ if err != nil {
+ return LatencyBuckets{}, err
+ }
+
+ upTo24hBuckets, err := tally.LinearDurationBuckets(6*time.Hour, time.Hour, 19)
+ if err != nil {
+ return LatencyBuckets{}, err
+ }
+ upTo24hBuckets = upTo24hBuckets[1:] // Remove the first 6h to get 1 hour aligned buckets
+
+ var writeLatencyBuckets tally.DurationBuckets
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo1sBuckets...)
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo10sBuckets...)
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo60sBuckets...)
+ writeLatencyBuckets = append(writeLatencyBuckets, upTo60mBuckets...)
+
+ var ingestLatencyBuckets tally.DurationBuckets
+ ingestLatencyBuckets = append(ingestLatencyBuckets, upTo1sBuckets...)
+ ingestLatencyBuckets = append(ingestLatencyBuckets, upTo10sBuckets...)
+ ingestLatencyBuckets = append(ingestLatencyBuckets, upTo60sBuckets...)
+ ingestLatencyBuckets = append(ingestLatencyBuckets, upTo60mBuckets...)
+ ingestLatencyBuckets = append(ingestLatencyBuckets, upTo6hBuckets...)
+ ingestLatencyBuckets = append(ingestLatencyBuckets, upTo24hBuckets...)
+
+ return LatencyBuckets{
+ WriteLatencyBuckets: writeLatencyBuckets,
+ IngestLatencyBuckets: ingestLatencyBuckets,
+ }, nil
+}
diff --git a/src/cmd/services/m3coordinator/ingest/metrics_test.go b/src/cmd/services/m3coordinator/ingest/metrics_test.go
new file mode 100644
index 0000000000..c2f5543e42
--- /dev/null
+++ b/src/cmd/services/m3coordinator/ingest/metrics_test.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ingest
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestLatencyBuckets(t *testing.T) {
+ buckets, err := NewLatencyBuckets()
+ require.NoError(t, err)
+
+ // NB(r): Bucket length is tested just to sanity check how many buckets we are creating
+ require.Equal(t, 50, len(buckets.WriteLatencyBuckets.AsDurations()))
+
+ // NB(r): Bucket values are tested to sanity check they look right
+ // nolint: lll
+ expected := "[0s 100ms 200ms 300ms 400ms 500ms 600ms 700ms 800ms 900ms 1s 1.5s 2s 2.5s 3s 3.5s 4s 4.5s 5s 5.5s 6s 6.5s 7s 7.5s 8s 8.5s 9s 9.5s 10s 15s 20s 25s 30s 35s 40s 45s 50s 55s 1m0s 5m0s 10m0s 15m0s 20m0s 25m0s 30m0s 35m0s 40m0s 45m0s 50m0s 55m0s]"
+ actual := fmt.Sprintf("%v", buckets.WriteLatencyBuckets.AsDurations())
+ require.Equal(t, expected, actual)
+
+ // NB(r): Bucket length is tested just to sanity check how many buckets we are creating
+ require.Equal(t, 80, len(buckets.IngestLatencyBuckets.AsDurations()))
+
+ // NB(r): Bucket values are tested to sanity check they look right
+ // nolint: lll
+ expected = "[0s 100ms 200ms 300ms 400ms 500ms 600ms 700ms 800ms 900ms 1s 1.5s 2s 2.5s 3s 3.5s 4s 4.5s 5s 5.5s 6s 6.5s 7s 7.5s 8s 8.5s 9s 9.5s 10s 15s 20s 25s 30s 35s 40s 45s 50s 55s 1m0s 5m0s 10m0s 15m0s 20m0s 25m0s 30m0s 35m0s 40m0s 45m0s 50m0s 55m0s 1h0m0s 1h30m0s 2h0m0s 2h30m0s 3h0m0s 3h30m0s 4h0m0s 4h30m0s 5h0m0s 5h30m0s 6h0m0s 6h30m0s 7h0m0s 8h0m0s 9h0m0s 10h0m0s 11h0m0s 12h0m0s 13h0m0s 14h0m0s 15h0m0s 16h0m0s 17h0m0s 18h0m0s 19h0m0s 20h0m0s 21h0m0s 22h0m0s 23h0m0s 24h0m0s]"
+ actual = fmt.Sprintf("%v", buckets.IngestLatencyBuckets.AsDurations())
+ require.Equal(t, expected, actual)
+}
diff --git a/src/cmd/services/m3dbnode/config/config_test.go b/src/cmd/services/m3dbnode/config/config_test.go
index 8218580a33..02ca66b97e 100644
--- a/src/cmd/services/m3dbnode/config/config_test.go
+++ b/src/cmd/services/m3dbnode/config/config_test.go
@@ -287,7 +287,7 @@ db:
- 1.1.1.1:2379
- 1.1.1.2:2379
- 1.1.1.3:2379
-
+
seedNodes:
listenPeerUrls:
- http://0.0.0.0:2380
@@ -634,6 +634,18 @@ func TestConfiguration(t *testing.T) {
initTimeout: null
watchWithRevision: 0
newDirectoryMode: null
+ retry:
+ initialBackoff: 0s
+ backoffFactor: 0
+ maxBackoff: 0s
+ maxRetries: 0
+ forever: null
+ jitter: null
+ requestTimeout: 0s
+ watchChanInitTimeout: 0s
+ watchChanCheckInterval: 0s
+ watchChanResetInterval: 0s
+ enableFastGets: false
statics: []
seedNodes:
rootDir: /var/lib/etcd
diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go
index 2b74e4e170..60a946b5e3 100644
--- a/src/cmd/services/m3query/config/config.go
+++ b/src/cmd/services/m3query/config/config.go
@@ -445,13 +445,28 @@ type CarbonConfiguration struct {
// RenderSeriesAllNaNs will render series that have only NaNs for entire
// output instead of returning an empty array of datapoints.
RenderSeriesAllNaNs bool `yaml:"renderSeriesAllNaNs"`
+ // CompileEscapeAllNotOnlyQuotes will escape all characters when using a backslash
+ // in a quoted string rather than just reserving for escaping quotes.
+ CompileEscapeAllNotOnlyQuotes bool `yaml:"compileEscapeAllNotOnlyQuotes"`
}
// CarbonIngesterConfiguration is the configuration struct for carbon ingestion.
type CarbonIngesterConfiguration struct {
- ListenAddress string `yaml:"listenAddress"`
- MaxConcurrency int `yaml:"maxConcurrency"`
- Rules []CarbonIngesterRuleConfiguration `yaml:"rules"`
+ ListenAddress string `yaml:"listenAddress"`
+ MaxConcurrency int `yaml:"maxConcurrency"`
+ Rewrite CarbonIngesterRewriteConfiguration `yaml:"rewrite"`
+ Rules []CarbonIngesterRuleConfiguration `yaml:"rules"`
+}
+
+// CarbonIngesterRewriteConfiguration is the configuration for rewriting
+// metrics at ingestion.
+type CarbonIngesterRewriteConfiguration struct {
+ // Cleanup will perform:
+ // - Trailing/leading dot elimination.
+ // - Double dot elimination.
+ // - Irregular char replacement with underscores (_), currently irregular
+ // is defined as not being in [0-9a-zA-Z-_:#].
+ Cleanup bool `yaml:"cleanup"`
}
// LookbackDurationOrDefault validates the LookbackDuration
@@ -523,6 +538,7 @@ func (c *CarbonIngesterConfiguration) RulesOrDefault(namespaces m3.ClusterNamesp
// ingestion rule.
type CarbonIngesterRuleConfiguration struct {
Pattern string `yaml:"pattern"`
+ Contains string `yaml:"contains"`
Continue bool `yaml:"continue"`
Aggregation CarbonIngesterAggregationConfiguration `yaml:"aggregation"`
Policies []CarbonIngesterStoragePolicyConfiguration `yaml:"policies"`
diff --git a/src/cmd/tools/dtest/docker/harness/query_api_test.go b/src/cmd/tools/dtest/docker/harness/query_api_test.go
index 5ef751b2ec..9ec0907219 100644
--- a/src/cmd/tools/dtest/docker/harness/query_api_test.go
+++ b/src/cmd/tools/dtest/docker/harness/query_api_test.go
@@ -21,6 +21,7 @@
package harness
import (
+ "encoding/json"
"fmt"
"strings"
"testing"
@@ -145,6 +146,24 @@ func verifyResponse(expectedStatus int) resources.ResponseVerifier {
return fmt.Errorf("expected json content type, got %v", contentType)
}
+ errorResponse := struct {
+ Status string `json:"status,omitempty"`
+ Error string `json:"error,omitempty"`
+ }{}
+
+ err = json.Unmarshal([]byte(resp), &errorResponse)
+ if err != nil {
+ return fmt.Errorf("failed unmarshalling response: %w", err)
+ }
+
+ if errorResponse.Status != "error" {
+ return fmt.Errorf("expected body to contain status 'error', got %v", errorResponse.Status)
+ }
+
+ if errorResponse.Error == "" {
+ return fmt.Errorf("expected body to contain error message")
+ }
+
return nil
}
}
diff --git a/src/ctl/ui/yarn.lock b/src/ctl/ui/yarn.lock
index 7659e24833..d9ad0e8834 100644
--- a/src/ctl/ui/yarn.lock
+++ b/src/ctl/ui/yarn.lock
@@ -9400,9 +9400,9 @@ uri-js@^4.2.2:
punycode "^2.1.0"
urijs@^1.16.1:
- version "1.19.1"
- resolved "https://registry.yarnpkg.com/urijs/-/urijs-1.19.1.tgz#5b0ff530c0cbde8386f6342235ba5ca6e995d25a"
- integrity sha512-xVrGVi94ueCJNrBSTjWqjvtgvl3cyOTThp2zaMaFNGp3F542TR6sM3f2o8RqZl+AwteClSVmoCyt0ka4RjQOQg==
+ version "1.19.5"
+ resolved "https://registry.yarnpkg.com/urijs/-/urijs-1.19.5.tgz#119683ab4b2fb0bd637e5ea6dd9117bcac68d3e4"
+ integrity sha512-48z9VGWwdCV5KfizHsE05DWS5fhK6gFlx5MjO7xu0Krc5FGPWzjlXEVV0nPMrdVuP7xmMHiPZ2HoYZwKOFTZOg==
urix@^0.1.0:
version "0.1.0"
diff --git a/src/dbnode/client/client_mock.go b/src/dbnode/client/client_mock.go
index 6ce3b00bc1..a40a47d848 100644
--- a/src/dbnode/client/client_mock.go
+++ b/src/dbnode/client/client_mock.go
@@ -1085,6 +1085,21 @@ func (mr *MockAdminSessionMockRecorder) FetchBlocksFromPeers(namespace, shard, c
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksFromPeers", reflect.TypeOf((*MockAdminSession)(nil).FetchBlocksFromPeers), namespace, shard, consistencyLevel, metadatas, opts)
}
+// BorrowConnections mocks base method
+func (m *MockAdminSession) BorrowConnections(shardID uint32, fn WithBorrowConnectionFn, opts BorrowConnectionOptions) (BorrowConnectionsResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BorrowConnections", shardID, fn, opts)
+ ret0, _ := ret[0].(BorrowConnectionsResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BorrowConnections indicates an expected call of BorrowConnections
+func (mr *MockAdminSessionMockRecorder) BorrowConnections(shardID, fn, opts interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BorrowConnections", reflect.TypeOf((*MockAdminSession)(nil).BorrowConnections), shardID, fn, opts)
+}
+
// MockOptions is a mock of Options interface
type MockOptions struct {
ctrl *gomock.Controller
@@ -4812,6 +4827,21 @@ func (mr *MockclientSessionMockRecorder) FetchBlocksFromPeers(namespace, shard,
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchBlocksFromPeers", reflect.TypeOf((*MockclientSession)(nil).FetchBlocksFromPeers), namespace, shard, consistencyLevel, metadatas, opts)
}
+// BorrowConnections mocks base method
+func (m *MockclientSession) BorrowConnections(shardID uint32, fn WithBorrowConnectionFn, opts BorrowConnectionOptions) (BorrowConnectionsResult, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "BorrowConnections", shardID, fn, opts)
+ ret0, _ := ret[0].(BorrowConnectionsResult)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// BorrowConnections indicates an expected call of BorrowConnections
+func (mr *MockclientSessionMockRecorder) BorrowConnections(shardID, fn, opts interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BorrowConnections", reflect.TypeOf((*MockclientSession)(nil).BorrowConnections), shardID, fn, opts)
+}
+
// Open mocks base method
func (m *MockclientSession) Open() error {
m.ctrl.T.Helper()
@@ -4932,7 +4962,7 @@ func (mr *MockhostQueueMockRecorder) ConnectionPool() *gomock.Call {
}
// BorrowConnection mocks base method
-func (m *MockhostQueue) BorrowConnection(fn withConnectionFn) error {
+func (m *MockhostQueue) BorrowConnection(fn WithConnectionFn) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BorrowConnection", fn)
ret0, _ := ret[0].(error)
@@ -5007,12 +5037,13 @@ func (mr *MockconnectionPoolMockRecorder) ConnectionCount() *gomock.Call {
}
// NextClient mocks base method
-func (m *MockconnectionPool) NextClient() (rpc.TChanNode, error) {
+func (m *MockconnectionPool) NextClient() (rpc.TChanNode, PooledChannel, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "NextClient")
ret0, _ := ret[0].(rpc.TChanNode)
- ret1, _ := ret[1].(error)
- return ret0, ret1
+ ret1, _ := ret[1].(PooledChannel)
+ ret2, _ := ret[2].(error)
+ return ret0, ret1, ret2
}
// NextClient indicates an expected call of NextClient
@@ -5057,7 +5088,7 @@ func (m *MockpeerSource) EXPECT() *MockpeerSourceMockRecorder {
}
// BorrowConnection mocks base method
-func (m *MockpeerSource) BorrowConnection(hostID string, fn withConnectionFn) error {
+func (m *MockpeerSource) BorrowConnection(hostID string, fn WithConnectionFn) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BorrowConnection", hostID, fn)
ret0, _ := ret[0].(error)
@@ -5108,7 +5139,7 @@ func (mr *MockpeerMockRecorder) Host() *gomock.Call {
}
// BorrowConnection mocks base method
-func (m *Mockpeer) BorrowConnection(fn withConnectionFn) error {
+func (m *Mockpeer) BorrowConnection(fn WithConnectionFn) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "BorrowConnection", fn)
ret0, _ := ret[0].(error)
diff --git a/src/dbnode/client/connection_pool.go b/src/dbnode/client/connection_pool.go
index 4d1e1af1d2..effe4405b9 100644
--- a/src/dbnode/client/connection_pool.go
+++ b/src/dbnode/client/connection_pool.go
@@ -29,14 +29,14 @@ import (
"sync/atomic"
"time"
- "github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
- "github.com/m3db/m3/src/dbnode/topology"
- xresource "github.com/m3db/m3/src/x/resource"
murmur3 "github.com/m3db/stackmurmur3/v2"
-
"github.com/uber-go/tally"
+ "github.com/uber/tchannel-go"
"github.com/uber/tchannel-go/thrift"
"go.uber.org/zap"
+
+ "github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
+ "github.com/m3db/m3/src/dbnode/topology"
)
const (
@@ -67,15 +67,21 @@ type connPool struct {
healthStatus tally.Gauge
}
+// PooledChannel is a tchannel.Channel for a pooled connection.
+type PooledChannel interface {
+ GetSubChannel(serviceName string, opts ...tchannel.SubChannelOption) *tchannel.SubChannel
+ Close()
+}
+
type conn struct {
- channel xresource.SimpleCloser
+ channel PooledChannel
client rpc.TChanNode
}
// NewConnectionFn is a function that creates a connection.
type NewConnectionFn func(
channelName string, addr string, opts Options,
-) (xresource.SimpleCloser, rpc.TChanNode, error)
+) (PooledChannel, rpc.TChanNode, error)
type healthCheckFn func(client rpc.TChanNode, opts Options) error
@@ -134,20 +140,20 @@ func (p *connPool) ConnectionCount() int {
return int(poolLen)
}
-func (p *connPool) NextClient() (rpc.TChanNode, error) {
+func (p *connPool) NextClient() (rpc.TChanNode, PooledChannel, error) {
p.RLock()
if p.status != statusOpen {
p.RUnlock()
- return nil, errConnectionPoolClosed
+ return nil, nil, errConnectionPoolClosed
}
if p.poolLen < 1 {
p.RUnlock()
- return nil, errConnectionPoolHasNoConnections
+ return nil, nil, errConnectionPoolHasNoConnections
}
n := atomic.AddInt64(&p.used, 1)
conn := p.pool[n%p.poolLen]
p.RUnlock()
- return conn.client, nil
+ return conn.client, conn.channel, nil
}
func (p *connPool) Close() {
diff --git a/src/dbnode/client/connection_pool_test.go b/src/dbnode/client/connection_pool_test.go
index f4d391180c..6c45038bdb 100644
--- a/src/dbnode/client/connection_pool_test.go
+++ b/src/dbnode/client/connection_pool_test.go
@@ -30,10 +30,10 @@ import (
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/topology"
xclock "github.com/m3db/m3/src/x/clock"
- xresource "github.com/m3db/m3/src/x/resource"
- "github.com/stretchr/testify/require"
"github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/require"
+ "github.com/uber/tchannel-go"
)
const (
@@ -42,10 +42,19 @@ const (
)
var (
- h = topology.NewHost(testHostStr, testHostAddr)
- channelNone = &nullChannel{}
+ h = topology.NewHost(testHostStr, testHostAddr)
)
+type noopPooledChannel struct{}
+
+func (c *noopPooledChannel) Close() {}
+func (c *noopPooledChannel) GetSubChannel(
+ serviceName string,
+ opts ...tchannel.SubChannelOption,
+) *tchannel.SubChannel {
+ return nil
+}
+
func newConnectionPoolTestOptions() Options {
return newSessionTestOptions().
SetBackgroundConnectInterval(5 * time.Millisecond).
@@ -85,12 +94,12 @@ func TestConnectionPoolConnectsAndRetriesConnects(t *testing.T) {
fn := func(
ch string, addr string, opts Options,
- ) (xresource.SimpleCloser, rpc.TChanNode, error) {
+ ) (PooledChannel, rpc.TChanNode, error) {
attempt := int(atomic.AddInt32(&attempts, 1))
if attempt == 1 {
return nil, nil, fmt.Errorf("a connect error")
}
- return channelNone, nil, nil
+ return &noopPooledChannel{}, nil, nil
}
opts = opts.SetNewConnectionFn(fn)
@@ -151,7 +160,7 @@ func TestConnectionPoolConnectsAndRetriesConnects(t *testing.T) {
conns.Close()
doneWg.Done()
- nextClient, err := conns.NextClient()
+ nextClient, _, err := conns.NextClient()
require.Nil(t, nextClient)
require.Equal(t, errConnectionPoolClosed, err)
}
@@ -237,12 +246,12 @@ func TestConnectionPoolHealthChecks(t *testing.T) {
fn := func(
ch string, addr string, opts Options,
- ) (xresource.SimpleCloser, rpc.TChanNode, error) {
+ ) (PooledChannel, rpc.TChanNode, error) {
attempt := atomic.AddInt32(&newConnAttempt, 1)
if attempt == 1 {
- return channelNone, client1, nil
+ return &noopPooledChannel{}, client1, nil
} else if attempt == 2 {
- return channelNone, client2, nil
+ return &noopPooledChannel{}, client2, nil
}
return nil, nil, fmt.Errorf("spawning only 2 connections")
}
@@ -307,7 +316,7 @@ func TestConnectionPoolHealthChecks(t *testing.T) {
return conns.ConnectionCount() == 1
}, 5*time.Second)
for i := 0; i < 2; i++ {
- nextClient, err := conns.NextClient()
+ nextClient, _, err := conns.NextClient()
require.NoError(t, err)
require.Equal(t, client2, nextClient)
}
@@ -324,17 +333,13 @@ func TestConnectionPoolHealthChecks(t *testing.T) {
// and the connection actually being removed.
return conns.ConnectionCount() == 0
}, 5*time.Second)
- nextClient, err := conns.NextClient()
+ nextClient, _, err := conns.NextClient()
require.Nil(t, nextClient)
require.Equal(t, errConnectionPoolHasNoConnections, err)
conns.Close()
- nextClient, err = conns.NextClient()
+ nextClient, _, err = conns.NextClient()
require.Nil(t, nextClient)
require.Equal(t, errConnectionPoolClosed, err)
}
-
-type nullChannel struct{}
-
-func (*nullChannel) Close() {}
diff --git a/src/dbnode/client/host_queue.go b/src/dbnode/client/host_queue.go
index 32a42dbb94..9d042ed99d 100644
--- a/src/dbnode/client/host_queue.go
+++ b/src/dbnode/client/host_queue.go
@@ -531,7 +531,7 @@ func (q *queue) asyncTaggedWrite(
// NB(bl): host is passed to writeState to determine the state of the
// shard on the node we're writing to
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available
callAllCompletionFns(ops, q.host, err)
@@ -591,7 +591,7 @@ func (q *queue) asyncTaggedWriteV2(
// NB(bl): host is passed to writeState to determine the state of the
// shard on the node we're writing to.
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available
callAllCompletionFns(ops, q.host, err)
@@ -656,7 +656,7 @@ func (q *queue) asyncWrite(
// NB(bl): host is passed to writeState to determine the state of the
// shard on the node we're writing to
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available
callAllCompletionFns(ops, q.host, err)
@@ -715,7 +715,7 @@ func (q *queue) asyncWriteV2(
// NB(bl): host is passed to writeState to determine the state of the
// shard on the node we're writing to.
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available.
callAllCompletionFns(ops, q.host, err)
@@ -768,7 +768,7 @@ func (q *queue) asyncFetch(op *fetchBatchOp) {
q.Done()
}
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available
op.completeAll(nil, err)
@@ -821,7 +821,7 @@ func (q *queue) asyncFetchV2(
q.Done()
}
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available.
callAllCompletionFns(ops, nil, err)
@@ -868,7 +868,7 @@ func (q *queue) asyncFetchTagged(op *fetchTaggedOp) {
q.Done()
}
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available
op.CompletionFn()(fetchTaggedResultAccumulatorOpts{host: q.host}, err)
@@ -901,7 +901,7 @@ func (q *queue) asyncAggregate(op *aggregateOp) {
q.Done()
}
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available
op.CompletionFn()(aggregateResultAccumulatorOpts{host: q.host}, err)
@@ -931,7 +931,7 @@ func (q *queue) asyncTruncate(op *truncateOp) {
q.workerPool.Go(func() {
cleanup := q.Done
- client, err := q.connPool.NextClient()
+ client, _, err := q.connPool.NextClient()
if err != nil {
// No client available
op.completionFn(nil, err)
@@ -1003,7 +1003,7 @@ func (q *queue) ConnectionPool() connectionPool {
return q.connPool
}
-func (q *queue) BorrowConnection(fn withConnectionFn) error {
+func (q *queue) BorrowConnection(fn WithConnectionFn) error {
q.RLock()
if q.status != statusOpen {
q.RUnlock()
@@ -1014,12 +1014,12 @@ func (q *queue) BorrowConnection(fn withConnectionFn) error {
defer q.Done()
q.RUnlock()
- conn, err := q.connPool.NextClient()
+ conn, ch, err := q.connPool.NextClient()
if err != nil {
return err
}
- fn(conn)
+ fn(conn, ch)
return nil
}
diff --git a/src/dbnode/client/host_queue_aggregate_test.go b/src/dbnode/client/host_queue_aggregate_test.go
index 5b57f93d49..e1b204fdc7 100644
--- a/src/dbnode/client/host_queue_aggregate_test.go
+++ b/src/dbnode/client/host_queue_aggregate_test.go
@@ -36,7 +36,7 @@ import (
)
func TestHostQueueDrainOnCloseAggregate(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{t})
+ ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
mockConnPool := NewMockconnectionPool(ctrl)
@@ -73,7 +73,7 @@ func TestHostQueueDrainOnCloseAggregate(t *testing.T) {
assert.Equal(t, aggregate.request.NameSpace, req.NameSpace)
}
mockClient.EXPECT().AggregateRaw(gomock.Any(), gomock.Any()).Do(aggregateExec).Return(nil, nil)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
mockConnPool.EXPECT().Close().AnyTimes()
// Close the queue should cause all writes to be flushed
@@ -202,7 +202,7 @@ func testHostQueueAggregate(
// Prepare mocks for flush
mockClient := rpc.NewMockTChanNode(ctrl)
if testOpts != nil && testOpts.nextClientErr != nil {
- mockConnPool.EXPECT().NextClient().Return(nil, testOpts.nextClientErr)
+ mockConnPool.EXPECT().NextClient().Return(nil, nil, testOpts.nextClientErr)
} else if testOpts != nil && testOpts.aggregateErr != nil {
aggregateExec := func(ctx thrift.Context, req *rpc.AggregateQueryRawRequest) {
require.NotNil(t, req)
@@ -213,7 +213,7 @@ func testHostQueueAggregate(
Do(aggregateExec).
Return(nil, testOpts.aggregateErr)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
} else {
aggregateExec := func(ctx thrift.Context, req *rpc.AggregateQueryRawRequest) {
require.NotNil(t, req)
@@ -224,7 +224,7 @@ func testHostQueueAggregate(
Do(aggregateExec).
Return(result, nil)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
}
// Fetch
diff --git a/src/dbnode/client/host_queue_fetch_batch_test.go b/src/dbnode/client/host_queue_fetch_batch_test.go
index a485afaf46..ef3138b527 100644
--- a/src/dbnode/client/host_queue_fetch_batch_test.go
+++ b/src/dbnode/client/host_queue_fetch_batch_test.go
@@ -123,7 +123,7 @@ func TestHostQueueFetchBatchesV2MultiNS(t *testing.T) {
Do(verifyFetchBatchRawV2).
Return(result, nil)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
for _, fetchBatch := range fetchBatches {
assert.NoError(t, queue.Enqueue(fetchBatch))
@@ -310,7 +310,7 @@ func testHostQueueFetchBatches(
}
}
if testOpts != nil && testOpts.nextClientErr != nil {
- mockConnPool.EXPECT().NextClient().Return(nil, testOpts.nextClientErr)
+ mockConnPool.EXPECT().NextClient().Return(nil, nil, testOpts.nextClientErr)
} else if testOpts != nil && testOpts.fetchRawBatchErr != nil {
if opts.UseV2BatchAPIs() {
mockClient.EXPECT().
@@ -326,7 +326,7 @@ func testHostQueueFetchBatches(
Do(fetchBatchRaw).
Return(nil, testOpts.fetchRawBatchErr)
}
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
} else {
if opts.UseV2BatchAPIs() {
mockClient.EXPECT().
@@ -343,7 +343,7 @@ func testHostQueueFetchBatches(
Return(result, nil)
}
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
}
// Fetch
diff --git a/src/dbnode/client/host_queue_fetch_tagged_test.go b/src/dbnode/client/host_queue_fetch_tagged_test.go
index e48e83092c..df66a2815b 100644
--- a/src/dbnode/client/host_queue_fetch_tagged_test.go
+++ b/src/dbnode/client/host_queue_fetch_tagged_test.go
@@ -72,7 +72,7 @@ func TestHostQueueDrainOnCloseFetchTagged(t *testing.T) {
assert.Equal(t, fetch.request.NameSpace, req.NameSpace)
}
mockClient.EXPECT().FetchTagged(gomock.Any(), gomock.Any()).Do(fetchTagged).Return(nil, nil)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
mockConnPool.EXPECT().Close().AnyTimes()
// Close the queue should cause all writes to be flushed
@@ -202,7 +202,7 @@ func testHostQueueFetchTagged(
// Prepare mocks for flush
mockClient := rpc.NewMockTChanNode(ctrl)
if testOpts != nil && testOpts.nextClientErr != nil {
- mockConnPool.EXPECT().NextClient().Return(nil, testOpts.nextClientErr)
+ mockConnPool.EXPECT().NextClient().Return(nil, nil, testOpts.nextClientErr)
} else if testOpts != nil && testOpts.fetchTaggedErr != nil {
fetchTaggedExec := func(ctx thrift.Context, req *rpc.FetchTaggedRequest) {
require.NotNil(t, req)
@@ -213,7 +213,7 @@ func testHostQueueFetchTagged(
Do(fetchTaggedExec).
Return(nil, testOpts.fetchTaggedErr)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
} else {
fetchTaggedExec := func(ctx thrift.Context, req *rpc.FetchTaggedRequest) {
require.NotNil(t, req)
@@ -224,7 +224,7 @@ func testHostQueueFetchTagged(
Do(fetchTaggedExec).
Return(result, nil)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
}
// Fetch
diff --git a/src/dbnode/client/host_queue_write_batch_test.go b/src/dbnode/client/host_queue_write_batch_test.go
index 0bcab39622..23ae55db34 100644
--- a/src/dbnode/client/host_queue_write_batch_test.go
+++ b/src/dbnode/client/host_queue_write_batch_test.go
@@ -115,7 +115,7 @@ func TestHostQueueWriteBatches(t *testing.T) {
mockClient.EXPECT().WriteBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil)
}
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
// Final write will flush
assert.NoError(t, queue.Enqueue(writes[3]))
@@ -203,7 +203,7 @@ func TestHostQueueWriteBatchesDifferentNamespaces(t *testing.T) {
// Assert the writes will be handled in two batches
mockClient.EXPECT().WriteBatchRawV2(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil).Times(1)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil).Times(1)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil).Times(1)
} else {
writeBatch := func(ctx thrift.Context, req *rpc.WriteBatchRawRequest) {
var writesForNamespace []*writeOperation
@@ -221,7 +221,7 @@ func TestHostQueueWriteBatchesDifferentNamespaces(t *testing.T) {
// Assert the writes will be handled in two batches
mockClient.EXPECT().WriteBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil).Times(2)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil).Times(2)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil).Times(2)
}
for _, write := range writes {
@@ -267,7 +267,7 @@ func TestHostQueueWriteBatchesNoClientAvailable(t *testing.T) {
// Prepare mocks for flush
nextClientErr := fmt.Errorf("an error")
- mockConnPool.EXPECT().NextClient().Return(nil, nextClientErr)
+ mockConnPool.EXPECT().NextClient().Return(nil, nil, nextClientErr)
// Write
var wg sync.WaitGroup
@@ -357,7 +357,7 @@ func TestHostQueueWriteBatchesPartialBatchErrs(t *testing.T) {
}
mockClient.EXPECT().WriteBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(batchErrs)
}
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
// Perform writes
for _, write := range writes {
@@ -418,7 +418,7 @@ func TestHostQueueWriteBatchesEntireBatchErr(t *testing.T) {
}
}
mockClient.EXPECT().WriteBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(writeErr)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
// Perform writes
for _, write := range writes {
@@ -488,7 +488,7 @@ func TestHostQueueDrainOnClose(t *testing.T) {
}
mockClient.EXPECT().WriteBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
mockConnPool.EXPECT().Close().AnyTimes()
diff --git a/src/dbnode/client/host_queue_write_tagged_test.go b/src/dbnode/client/host_queue_write_tagged_test.go
index 46f8ae5d03..c99f3f05f4 100644
--- a/src/dbnode/client/host_queue_write_tagged_test.go
+++ b/src/dbnode/client/host_queue_write_tagged_test.go
@@ -129,7 +129,7 @@ func TestHostQueueWriteTaggedBatches(t *testing.T) {
}
mockClient.EXPECT().WriteTaggedBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil)
}
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
// Final write will flush
assert.NoError(t, queue.Enqueue(writes[3]))
@@ -225,7 +225,7 @@ func TestHostQueueWriteTaggedBatchesDifferentNamespaces(t *testing.T) {
}
// Assert the writes will be handled in two batches.
mockClient.EXPECT().WriteTaggedBatchRawV2(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil).Times(1)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil).Times(1)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil).Times(1)
} else {
writeBatch := func(ctx thrift.Context, req *rpc.WriteTaggedBatchRawRequest) {
var writesForNamespace []*writeTaggedOperation
@@ -243,7 +243,7 @@ func TestHostQueueWriteTaggedBatchesDifferentNamespaces(t *testing.T) {
}
// Assert the writes will be handled in two batches.
mockClient.EXPECT().WriteTaggedBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil).Times(2)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil).Times(2)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil).Times(2)
}
for _, write := range writes {
assert.NoError(t, queue.Enqueue(write))
@@ -288,7 +288,7 @@ func TestHostQueueWriteTaggedBatchesNoClientAvailable(t *testing.T) {
// Prepare mocks for flush
nextClientErr := fmt.Errorf("an error")
- mockConnPool.EXPECT().NextClient().Return(nil, nextClientErr)
+ mockConnPool.EXPECT().NextClient().Return(nil, nil, nextClientErr)
// Write
var wg sync.WaitGroup
@@ -368,7 +368,7 @@ func TestHostQueueWriteTaggedBatchesPartialBatchErrs(t *testing.T) {
}},
}}
mockClient.EXPECT().WriteTaggedBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(batchErrs)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
// Perform writes
for _, write := range writes {
@@ -428,7 +428,7 @@ func TestHostQueueWriteTaggedBatchesEntireBatchErr(t *testing.T) {
}
}
mockClient.EXPECT().WriteTaggedBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(writeErr)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
// Perform writes
for _, write := range writes {
@@ -499,7 +499,7 @@ func TestHostQueueDrainOnCloseTaggedWrite(t *testing.T) {
}
mockClient.EXPECT().WriteTaggedBatchRaw(gomock.Any(), gomock.Any()).Do(writeBatch).Return(nil)
- mockConnPool.EXPECT().NextClient().Return(mockClient, nil)
+ mockConnPool.EXPECT().NextClient().Return(mockClient, &noopPooledChannel{}, nil)
mockConnPool.EXPECT().Close().AnyTimes()
diff --git a/src/dbnode/client/options.go b/src/dbnode/client/options.go
index 9ec1613fd8..5cb4ea3eb1 100644
--- a/src/dbnode/client/options.go
+++ b/src/dbnode/client/options.go
@@ -42,13 +42,12 @@ import (
"github.com/m3db/m3/src/x/ident"
"github.com/m3db/m3/src/x/instrument"
"github.com/m3db/m3/src/x/pool"
- xresource "github.com/m3db/m3/src/x/resource"
xretry "github.com/m3db/m3/src/x/retry"
"github.com/m3db/m3/src/x/sampler"
"github.com/m3db/m3/src/x/serialize"
xsync "github.com/m3db/m3/src/x/sync"
- tchannel "github.com/uber/tchannel-go"
+ "github.com/uber/tchannel-go"
"github.com/uber/tchannel-go/thrift"
)
@@ -319,7 +318,7 @@ func NewOptionsForAsyncClusters(opts Options, topoInits []topology.Initializer,
func defaultNewConnectionFn(
channelName string, address string, clientOpts Options,
-) (xresource.SimpleCloser, rpc.TChanNode, error) {
+) (PooledChannel, rpc.TChanNode, error) {
// NB(r): Keep ref to a local channel options since it's actually modified
// by TChannel itself to set defaults.
var opts *tchannel.ChannelOptions
diff --git a/src/dbnode/client/peer.go b/src/dbnode/client/peer.go
index b5bdc075b4..0128e58453 100644
--- a/src/dbnode/client/peer.go
+++ b/src/dbnode/client/peer.go
@@ -38,6 +38,6 @@ func (p *sessionPeer) Host() topology.Host {
return p.host
}
-func (p *sessionPeer) BorrowConnection(fn withConnectionFn) error {
+func (p *sessionPeer) BorrowConnection(fn WithConnectionFn) error {
return p.source.BorrowConnection(p.host.ID(), fn)
}
diff --git a/src/dbnode/client/replicated_session.go b/src/dbnode/client/replicated_session.go
index 8064a5df8e..5df3dfb506 100644
--- a/src/dbnode/client/replicated_session.go
+++ b/src/dbnode/client/replicated_session.go
@@ -24,6 +24,9 @@ import (
"fmt"
"time"
+ "github.com/uber-go/tally"
+ "go.uber.org/zap"
+
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/namespace"
"github.com/m3db/m3/src/dbnode/storage/block"
@@ -33,8 +36,6 @@ import (
"github.com/m3db/m3/src/x/ident"
m3sync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
- "github.com/uber-go/tally"
- "go.uber.org/zap"
)
type newSessionFn func(Options) (clientSession, error)
@@ -111,8 +112,6 @@ func newReplicatedSession(opts Options, asyncOpts []Options, options ...replicat
return &session, nil
}
-type writeFunc func(Session) error
-
func (s *replicatedSession) setSession(opts Options) error {
if opts.TopologyInitializer() == nil {
return nil
@@ -343,6 +342,14 @@ func (s replicatedSession) FetchBlocksFromPeers(
return s.session.FetchBlocksFromPeers(namespace, shard, consistencyLevel, metadatas, opts)
}
+func (s *replicatedSession) BorrowConnections(
+ shardID uint32,
+ fn WithBorrowConnectionFn,
+ opts BorrowConnectionOptions,
+) (BorrowConnectionsResult, error) {
+ return s.session.BorrowConnections(shardID, fn, opts)
+}
+
// Open the client session.
func (s replicatedSession) Open() error {
if err := s.session.Open(); err != nil {
diff --git a/src/dbnode/client/session.go b/src/dbnode/client/session.go
index be79aa8331..e1de530ebe 100644
--- a/src/dbnode/client/session.go
+++ b/src/dbnode/client/session.go
@@ -629,7 +629,73 @@ func (s *session) Open() error {
return nil
}
-func (s *session) BorrowConnection(hostID string, fn withConnectionFn) error {
+func (s *session) BorrowConnections(
+ shardID uint32,
+ fn WithBorrowConnectionFn,
+ opts BorrowConnectionOptions,
+) (BorrowConnectionsResult, error) {
+ var result BorrowConnectionsResult
+ s.state.RLock()
+ topoMap, err := s.topologyMapWithStateRLock()
+ s.state.RUnlock()
+ if err != nil {
+ return result, err
+ }
+
+ var (
+ multiErr = xerrors.NewMultiError()
+ breakLoop bool
+ )
+ err = topoMap.RouteShardForEach(shardID, func(
+ _ int,
+ shard shard.Shard,
+ host topology.Host,
+ ) {
+ if multiErr.NumErrors() > 0 || breakLoop {
+ // Error or has broken
+ return
+ }
+
+ var (
+ userResult WithBorrowConnectionResult
+ userErr error
+ )
+ borrowErr := s.BorrowConnection(host.ID(), func(
+ client rpc.TChanNode,
+ channel PooledChannel,
+ ) {
+ userResult, userErr = fn(shard, host, client, channel)
+ })
+ if borrowErr != nil {
+ // Wasn't able to even borrow, skip if don't want to error
+ // on down hosts or return the borrow error.
+ if !opts.ContinueOnBorrowError {
+ multiErr = multiErr.Add(borrowErr)
+ }
+ return
+ }
+
+ // Track successful borrow.
+ result.Borrowed++
+
+ // Track whether has broken loop.
+ breakLoop = userResult.Break
+
+ // Return whether user error occurred to break or not.
+ if userErr != nil {
+ multiErr = multiErr.Add(userErr)
+ }
+ })
+ if err != nil {
+ // Route error.
+ return result, err
+ }
+ // Potentially a user error or borrow error, otherwise
+ // FinalError() will return nil.
+ return result, multiErr.FinalError()
+}
+
+func (s *session) BorrowConnection(hostID string, fn WithConnectionFn) error {
s.state.RLock()
unlocked := false
queue, ok := s.state.queuesByHostID[hostID]
@@ -637,13 +703,13 @@ func (s *session) BorrowConnection(hostID string, fn withConnectionFn) error {
s.state.RUnlock()
return errSessionHasNoHostQueueForHost
}
- err := queue.BorrowConnection(func(c rpc.TChanNode) {
+ err := queue.BorrowConnection(func(client rpc.TChanNode, ch PooledChannel) {
// Unlock early on success
s.state.RUnlock()
unlocked = true
// Execute function with borrowed connection
- fn(c)
+ fn(client, ch)
})
if !unlocked {
s.state.RUnlock()
@@ -2557,7 +2623,7 @@ func (s *session) streamBlocksMetadataFromPeer(
}
var attemptErr error
- checkedAttemptFn := func(client rpc.TChanNode) {
+ checkedAttemptFn := func(client rpc.TChanNode, _ PooledChannel) {
attemptErr = attemptFn(client)
}
@@ -3074,7 +3140,7 @@ func (s *session) streamBlocksBatchFromPeer(
// Attempt request
if err := retrier.Attempt(func() error {
var attemptErr error
- borrowErr := peer.BorrowConnection(func(client rpc.TChanNode) {
+ borrowErr := peer.BorrowConnection(func(client rpc.TChanNode, _ PooledChannel) {
tctx, _ := thrift.NewContext(s.streamBlocksBatchTimeout)
result, attemptErr = client.FetchBlocksRaw(tctx, req)
})
diff --git a/src/dbnode/client/session_fetch_bulk_blocks_test.go b/src/dbnode/client/session_fetch_bulk_blocks_test.go
index db4d1afecd..0183fb4703 100644
--- a/src/dbnode/client/session_fetch_bulk_blocks_test.go
+++ b/src/dbnode/client/session_fetch_bulk_blocks_test.go
@@ -339,7 +339,7 @@ func TestFetchBootstrapBlocksDontRetryHostNotAvailableInRetrier(t *testing.T) {
connectionPool := NewMockconnectionPool(ctrl)
connectionPool.EXPECT().
NextClient().
- Return(nil, errConnectionPoolHasNoConnections).
+ Return(nil, nil, errConnectionPoolHasNoConnections).
AnyTimes()
hostQueue := NewMockhostQueue(ctrl)
hostQueue.EXPECT().Open()
@@ -2008,15 +2008,15 @@ func defaultHostAndClientWithExpect(
) (*MockhostQueue, *rpc.MockTChanNode) {
client := rpc.NewMockTChanNode(ctrl)
connectionPool := NewMockconnectionPool(ctrl)
- connectionPool.EXPECT().NextClient().Return(client, nil).AnyTimes()
+ connectionPool.EXPECT().NextClient().Return(client, &noopPooledChannel{}, nil).AnyTimes()
hostQueue := NewMockhostQueue(ctrl)
hostQueue.EXPECT().Open()
hostQueue.EXPECT().Host().Return(host).AnyTimes()
hostQueue.EXPECT().ConnectionCount().Return(opts.MinConnectionCount()).Times(sessionTestShards)
hostQueue.EXPECT().ConnectionPool().Return(connectionPool).AnyTimes()
- hostQueue.EXPECT().BorrowConnection(gomock.Any()).Do(func(fn withConnectionFn) {
- fn(client)
+ hostQueue.EXPECT().BorrowConnection(gomock.Any()).Do(func(fn WithConnectionFn) {
+ fn(client, &noopPooledChannel{})
}).Return(nil).AnyTimes()
hostQueue.EXPECT().Close()
diff --git a/src/dbnode/client/session_fetch_high_concurrency_test.go b/src/dbnode/client/session_fetch_high_concurrency_test.go
index a4acd087c3..35a50e7677 100644
--- a/src/dbnode/client/session_fetch_high_concurrency_test.go
+++ b/src/dbnode/client/session_fetch_high_concurrency_test.go
@@ -28,6 +28,10 @@ import (
"testing"
"time"
+ "github.com/golang/mock/gomock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/dbnode/encoding/m3tsz"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
@@ -35,18 +39,9 @@ import (
"github.com/m3db/m3/src/dbnode/topology"
"github.com/m3db/m3/src/dbnode/ts"
"github.com/m3db/m3/src/x/ident"
- xresource "github.com/m3db/m3/src/x/resource"
xtime "github.com/m3db/m3/src/x/time"
-
- "github.com/golang/mock/gomock"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
-type noopCloser struct{}
-
-func (noopCloser) Close() {}
-
func TestSessionFetchIDsHighConcurrency(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -102,7 +97,7 @@ func TestSessionFetchIDsHighConcurrency(t *testing.T) {
// to be able to mock the entire end to end pipeline
newConnFn := func(
_ string, addr string, _ Options,
- ) (xresource.SimpleCloser, rpc.TChanNode, error) {
+ ) (PooledChannel, rpc.TChanNode, error) {
mockClient := rpc.NewMockTChanNode(ctrl)
mockClient.EXPECT().Health(gomock.Any()).
Return(healthCheckResult, nil).
@@ -110,7 +105,7 @@ func TestSessionFetchIDsHighConcurrency(t *testing.T) {
mockClient.EXPECT().FetchBatchRaw(gomock.Any(), gomock.Any()).
Return(respResult, nil).
AnyTimes()
- return noopCloser{}, mockClient, nil
+ return &noopPooledChannel{}, mockClient, nil
}
shards := make([]shard.Shard, numShards)
for i := range shards {
diff --git a/src/dbnode/client/types.go b/src/dbnode/client/types.go
index 53399bf333..0de707fe23 100644
--- a/src/dbnode/client/types.go
+++ b/src/dbnode/client/types.go
@@ -23,6 +23,7 @@ package client
import (
"time"
+ "github.com/m3db/m3/src/cluster/shard"
"github.com/m3db/m3/src/dbnode/encoding"
"github.com/m3db/m3/src/dbnode/generated/thrift/rpc"
"github.com/m3db/m3/src/dbnode/namespace"
@@ -42,7 +43,7 @@ import (
xsync "github.com/m3db/m3/src/x/sync"
xtime "github.com/m3db/m3/src/x/time"
- tchannel "github.com/uber/tchannel-go"
+ "github.com/uber/tchannel-go"
)
// Client can create sessions to write and read to a cluster.
@@ -245,6 +246,39 @@ type AdminSession interface {
metadatas []block.ReplicaMetadata,
opts result.Options,
) (PeerBlocksIter, error)
+
+ // BorrowConnections will borrow connection for hosts belonging to a shard.
+ BorrowConnections(
+ shardID uint32,
+ fn WithBorrowConnectionFn,
+ opts BorrowConnectionOptions,
+ ) (BorrowConnectionsResult, error)
+}
+
+// BorrowConnectionOptions are options to use when borrowing a connection
+type BorrowConnectionOptions struct {
+ // ContinueOnBorrowError allows skipping hosts that cannot borrow
+ // a connection for.
+ ContinueOnBorrowError bool
+}
+
+// BorrowConnectionsResult is a result used when borrowing connections.
+type BorrowConnectionsResult struct {
+ Borrowed int
+}
+
+// WithBorrowConnectionFn is used to do work with a borrowed connection.
+type WithBorrowConnectionFn func(
+ shard shard.Shard,
+ host topology.Host,
+ client rpc.TChanNode,
+ channel PooledChannel,
+) (WithBorrowConnectionResult, error)
+
+// WithBorrowConnectionResult is returned from a borrow connection function.
+type WithBorrowConnectionResult struct {
+ // Break will break the iteration.
+ Break bool
}
// Options is a set of client options.
@@ -694,13 +728,14 @@ type hostQueue interface {
ConnectionPool() connectionPool
// BorrowConnection will borrow a connection and execute a user function.
- BorrowConnection(fn withConnectionFn) error
+ BorrowConnection(fn WithConnectionFn) error
// Close the host queue, will flush any operations still pending.
Close()
}
-type withConnectionFn func(c rpc.TChanNode)
+// WithConnectionFn is a callback for a connection to a host.
+type WithConnectionFn func(client rpc.TChanNode, ch PooledChannel)
type connectionPool interface {
// Open starts the connection pool connecting and health checking.
@@ -710,7 +745,7 @@ type connectionPool interface {
ConnectionCount() int
// NextClient gets the next client for use by the connection pool.
- NextClient() (rpc.TChanNode, error)
+ NextClient() (rpc.TChanNode, PooledChannel, error)
// Close the connection pool.
Close()
@@ -718,7 +753,7 @@ type connectionPool interface {
type peerSource interface {
// BorrowConnection will borrow a connection and execute a user function.
- BorrowConnection(hostID string, fn withConnectionFn) error
+ BorrowConnection(hostID string, fn WithConnectionFn) error
}
type peer interface {
@@ -726,7 +761,7 @@ type peer interface {
Host() topology.Host
// BorrowConnection will borrow a connection and execute a user function.
- BorrowConnection(fn withConnectionFn) error
+ BorrowConnection(fn WithConnectionFn) error
}
type status int
diff --git a/src/dbnode/generated-source-files.mk b/src/dbnode/generated-source-files.mk
index 400a7c68c4..748dfeaea9 100644
--- a/src/dbnode/generated-source-files.mk
+++ b/src/dbnode/generated-source-files.mk
@@ -302,7 +302,7 @@ genny-list-all: \
genny-list-storage-id:
cd $(m3x_package_path) && make genny-pooled-elem-list-gen \
pkg=storage \
- value_type=doc.Document \
+ value_type=doc.Metadata \
rename_type_prefix=id \
rename_type_middle=ID \
target_package=github.com/m3db/m3/src/dbnode/storage
diff --git a/src/dbnode/integration/fs_bootstrap_index_volume_type_test.go b/src/dbnode/integration/fs_bootstrap_index_volume_type_test.go
index 58ac71aca6..6f67566bcb 100644
--- a/src/dbnode/integration/fs_bootstrap_index_volume_type_test.go
+++ b/src/dbnode/integration/fs_bootstrap_index_volume_type_test.go
@@ -73,7 +73,7 @@ func TestFilesystemBootstrapIndexVolumeTypes(t *testing.T) {
ID: ident.StringID("foo"),
Tags: ident.NewTags(ident.StringTag("city", "new_york"), ident.StringTag("foo", "foo")),
}
- fooDoc := doc.Document{
+ fooDoc := doc.Metadata{
ID: fooSeries.ID.Bytes(),
Fields: []doc.Field{
doc.Field{Name: []byte("city"), Value: []byte("new_york")},
@@ -85,7 +85,7 @@ func TestFilesystemBootstrapIndexVolumeTypes(t *testing.T) {
ID: ident.StringID("bar"),
Tags: ident.NewTags(ident.StringTag("city", "new_jersey")),
}
- barDoc := doc.Document{
+ barDoc := doc.Metadata{
ID: barSeries.ID.Bytes(),
Fields: []doc.Field{
doc.Field{Name: []byte("city"), Value: []byte("new_jersey")},
@@ -96,7 +96,7 @@ func TestFilesystemBootstrapIndexVolumeTypes(t *testing.T) {
ID: ident.StringID("baz"),
Tags: ident.NewTags(ident.StringTag("city", "seattle")),
}
- bazDoc := doc.Document{
+ bazDoc := doc.Metadata{
ID: bazSeries.ID.Bytes(),
Fields: []doc.Field{
doc.Field{Name: []byte("city"), Value: []byte("seattle")},
@@ -107,7 +107,7 @@ func TestFilesystemBootstrapIndexVolumeTypes(t *testing.T) {
ID: ident.StringID("qux"),
Tags: ident.NewTags(ident.StringTag("city", "new_harmony")),
}
- quxDoc := doc.Document{
+ quxDoc := doc.Metadata{
ID: quxSeries.ID.Bytes(),
Fields: []doc.Field{
doc.Field{Name: []byte("city"), Value: []byte("new_harmony")},
@@ -118,7 +118,7 @@ func TestFilesystemBootstrapIndexVolumeTypes(t *testing.T) {
ID: ident.StringID("dux"),
Tags: ident.NewTags(ident.StringTag("city", "los_angeles")),
}
- duxDoc := doc.Document{
+ duxDoc := doc.Metadata{
ID: duxSeries.ID.Bytes(),
Fields: []doc.Field{
doc.Field{Name: []byte("city"), Value: []byte("los_angeles")},
@@ -163,12 +163,12 @@ func TestFilesystemBootstrapIndexVolumeTypes(t *testing.T) {
Start: now,
},
})
- defaultIndexDocs := []doc.Document{
+ defaultIndexDocs := []doc.Metadata{
fooDoc,
barDoc,
bazDoc,
}
- extraIndexDocs := []doc.Document{
+ extraIndexDocs := []doc.Metadata{
quxDoc,
duxDoc,
}
diff --git a/src/dbnode/integration/integration.go b/src/dbnode/integration/integration.go
index 36cdf5fffb..3c61c170d3 100644
--- a/src/dbnode/integration/integration.go
+++ b/src/dbnode/integration/integration.go
@@ -450,7 +450,7 @@ func writeTestIndexDataToDisk(
indexVolumeType idxpersist.IndexVolumeType,
blockStart time.Time,
shards []uint32,
- docs []doc.Document,
+ docs []doc.Metadata,
) error {
blockSize := md.Options().IndexOptions().BlockSize()
fsOpts := storageOpts.CommitLogOptions().FilesystemOptions()
diff --git a/src/dbnode/integration/query_limit_test.go b/src/dbnode/integration/query_limit_test.go
new file mode 100644
index 0000000000..53a00b7665
--- /dev/null
+++ b/src/dbnode/integration/query_limit_test.go
@@ -0,0 +1,109 @@
+// +build integration
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package integration
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/m3db/m3/src/dbnode/client"
+ "github.com/m3db/m3/src/dbnode/namespace"
+ "github.com/m3db/m3/src/dbnode/storage"
+ "github.com/m3db/m3/src/dbnode/storage/index"
+ "github.com/m3db/m3/src/dbnode/storage/limits"
+ "github.com/m3db/m3/src/m3ninx/idx"
+ "github.com/m3db/m3/src/x/ident"
+ xtime "github.com/m3db/m3/src/x/time"
+)
+
+func TestQueryLimitExceededError(t *testing.T) {
+ testOpts, ns := newTestOptionsWithIndexedNamespace(t)
+ testSetup := newTestSetupWithQueryLimits(t, testOpts)
+ defer testSetup.Close()
+
+ require.NoError(t, testSetup.StartServer())
+ defer func() {
+ require.NoError(t, testSetup.StopServer())
+ }()
+
+ var (
+ nowFn = testSetup.StorageOpts().ClockOptions().NowFn()
+ end = nowFn().Truncate(time.Hour)
+ start = end.Add(-time.Hour)
+ query = index.Query{Query: idx.NewTermQuery([]byte("tag"), []byte("value"))}
+ queryOpts = index.QueryOptions{StartInclusive: start, EndExclusive: end}
+ )
+
+ session, err := testSetup.M3DBClient().DefaultSession()
+ require.NoError(t, err)
+
+ for i := 0; i < 2; i++ {
+ var (
+ metricName = fmt.Sprintf("metric_%v", i)
+ tags = ident.StringTag("tag", "value")
+ timestamp = nowFn().Add(-time.Minute * time.Duration(i+1))
+ )
+ session.WriteTagged(ns.ID(), ident.StringID(metricName),
+ ident.NewTagsIterator(ident.NewTags(tags)), timestamp, 0.0, xtime.Second, nil)
+ }
+
+ _, _, err = session.FetchTagged(ns.ID(), query, queryOpts)
+ require.True(t, client.IsResourceExhaustedError(err),
+ "expected resource exhausted error, got: %v", err)
+}
+
+func newTestOptionsWithIndexedNamespace(t *testing.T) (TestOptions, namespace.Metadata) {
+ idxOpts := namespace.NewIndexOptions().SetEnabled(true)
+ nsOpts := namespace.NewOptions().SetIndexOptions(idxOpts)
+ ns, err := namespace.NewMetadata(testNamespaces[0], nsOpts)
+ require.NoError(t, err)
+
+ testOpts := NewTestOptions(t).SetNamespaces([]namespace.Metadata{ns})
+ return testOpts, ns
+}
+
+func newTestSetupWithQueryLimits(t *testing.T, opts TestOptions) TestSetup {
+ storageLimitsFn := func(storageOpts storage.Options) storage.Options {
+ queryLookback := limits.DefaultLookbackLimitOptions()
+ queryLookback.Limit = 1
+ queryLookback.Lookback = time.Hour
+
+ limitOpts := limits.NewOptions().
+ SetBytesReadLimitOpts(queryLookback).
+ SetDocsLimitOpts(queryLookback).
+ SetInstrumentOptions(storageOpts.InstrumentOptions())
+ queryLimits, err := limits.NewQueryLimits(limitOpts)
+ require.NoError(t, err)
+
+ indexOpts := storageOpts.IndexOptions().SetQueryLimits(queryLimits)
+ return storageOpts.SetIndexOptions(indexOpts)
+ }
+
+ setup, err := NewTestSetup(t, opts, nil, storageLimitsFn)
+ require.NoError(t, err)
+
+ return setup
+}
diff --git a/src/dbnode/persist/fs/merger.go b/src/dbnode/persist/fs/merger.go
index a6e78dc496..ca71228158 100644
--- a/src/dbnode/persist/fs/merger.go
+++ b/src/dbnode/persist/fs/merger.go
@@ -240,7 +240,7 @@ func (m *merger) Merge(
ctx.Reset()
err = mergeWith.ForEachRemaining(
ctx, blockStart,
- func(seriesMetadata doc.Document, mergeWithData block.FetchBlockResult) error {
+ func(seriesMetadata doc.Metadata, mergeWithData block.FetchBlockResult) error {
segmentReaders = segmentReaders[:0]
segmentReaders = appendBlockReadersToSegmentReaders(segmentReaders, mergeWithData.Blocks)
diff --git a/src/dbnode/persist/fs/merger_test.go b/src/dbnode/persist/fs/merger_test.go
index df9531c479..57c2b4c9a1 100644
--- a/src/dbnode/persist/fs/merger_test.go
+++ b/src/dbnode/persist/fs/merger_test.go
@@ -703,7 +703,8 @@ func mockMergeWithFromData(
Start: startTime,
Blocks: []xio.BlockReader{blockReaderFromData(data, segReader, startTime, blockSize)},
}
- fn(doc.Document{ID: id.Bytes()}, br)
+ err := fn(doc.Metadata{ID: id.Bytes()}, br)
+ require.NoError(t, err)
}
}
})
diff --git a/src/dbnode/persist/fs/persist_manager.go b/src/dbnode/persist/fs/persist_manager.go
index 1051301a69..9b824e7c79 100644
--- a/src/dbnode/persist/fs/persist_manager.go
+++ b/src/dbnode/persist/fs/persist_manager.go
@@ -115,22 +115,106 @@ type dataPersistManager struct {
snapshotID uuid.UUID
}
-type indexPersistManager struct {
- writer IndexFileSetWriter
- segmentWriter m3ninxpersist.MutableSegmentFileSetWriter
-
+type singleUseIndexWriterState struct {
// identifiers required to know which file to open
// after persistence is over
fileSetIdentifier FileSetFileIdentifier
fileSetType persist.FileSetType
- // track state of writers
- writeErr error
- initialized bool
+ // track state of writer
+ writeErr error
+}
+
+// Support writing to multiple index blocks/filesets during index persist.
+// This allows us to prepare an index fileset writer per block start.
+type singleUseIndexWriter struct {
+ // back-ref to the index persist manager so we can share resources there
+ manager *indexPersistManager
+ writer IndexFileSetWriter
+
+ state singleUseIndexWriterState
+}
+
+func (s *singleUseIndexWriter) persistIndex(builder segment.Builder) error {
+ // Lock the index persist manager as we're sharing the segment builder as a resource.
+ s.manager.Lock()
+ defer s.manager.Unlock()
+
+ markError := func(err error) {
+ s.state.writeErr = err
+ }
+ if err := s.state.writeErr; err != nil {
+ return fmt.Errorf("encountered error: %w, skipping further attempts to persist data", err)
+ }
+
+ if err := s.manager.segmentWriter.Reset(builder); err != nil {
+ markError(err)
+ return err
+ }
+
+ if err := s.writer.WriteSegmentFileSet(s.manager.segmentWriter); err != nil {
+ markError(err)
+ return err
+ }
+
+ return nil
+}
+
+func (s *singleUseIndexWriter) closeIndex() ([]segment.Segment, error) {
+ s.manager.Lock()
+ defer s.manager.Unlock()
+
+ // This writer will be thrown away after we're done persisting.
+ defer func() {
+ s.state = singleUseIndexWriterState{fileSetType: -1}
+ s.manager = nil
+ s.writer = nil
+ }()
+
+ // s.e. we're done writing all segments for PreparedIndexPersist.
+ // so we can close the writer.
+ if err := s.writer.Close(); err != nil {
+ return nil, err
+ }
+
+ // only attempt to retrieve data if we have not encountered errors during
+ // any writes.
+ if err := s.state.writeErr; err != nil {
+ return nil, err
+ }
+
+ // and then we get persistent segments backed by mmap'd data so the index
+ // can safely evict the segment's we have just persisted.
+ result, err := ReadIndexSegments(ReadIndexSegmentsOptions{
+ ReaderOptions: IndexReaderOpenOptions{
+ Identifier: s.state.fileSetIdentifier,
+ FileSetType: s.state.fileSetType,
+ },
+ FilesystemOptions: s.manager.opts,
+ newReaderFn: s.manager.newReaderFn,
+ newPersistentSegmentFn: s.manager.newPersistentSegmentFn,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return result.Segments, nil
+}
+
+type indexPersistManager struct {
+ sync.Mutex
+
+ // segmentWriter holds the bulk of the re-usable in-mem resources so
+ // we want to share this across writers.
+ segmentWriter m3ninxpersist.MutableSegmentFileSetWriter
// hooks used for testing
newReaderFn newIndexReaderFn
newPersistentSegmentFn newPersistentSegmentFn
+ newIndexWriterFn newIndexWriterFn
+
+ // options used by index writers
+ opts Options
}
type newIndexReaderFn func(Options) (IndexFileSetReader, error)
@@ -140,6 +224,8 @@ type newPersistentSegmentFn func(
m3ninxfs.Options,
) (m3ninxfs.Segment, error)
+type newIndexWriterFn func(Options) (IndexFileSetWriter, error)
+
type persistManagerMetrics struct {
writeDurationMs tally.Gauge
throttleDurationMs tally.Gauge
@@ -163,11 +249,6 @@ func NewPersistManager(opts Options) (persist.Manager, error) {
return nil, err
}
- idxWriter, err := NewIndexWriter(opts)
- if err != nil {
- return nil, err
- }
-
segmentWriter, err := m3ninxpersist.NewMutableSegmentFileSetWriter(
opts.FSTWriterOptions())
if err != nil {
@@ -186,30 +267,31 @@ func NewPersistManager(opts Options) (persist.Manager, error) {
snapshotMetadataWriter: NewSnapshotMetadataWriter(opts),
},
indexPM: indexPersistManager{
- writer: idxWriter,
segmentWriter: segmentWriter,
+ // fs opts are used by underlying index writers
+ opts: opts,
},
status: persistManagerIdle,
metrics: newPersistManagerMetrics(scope),
}
pm.indexPM.newReaderFn = NewIndexReader
pm.indexPM.newPersistentSegmentFn = m3ninxpersist.NewSegment
+ pm.indexPM.newIndexWriterFn = NewIndexWriter
pm.runtimeOptsListener = opts.RuntimeOptionsManager().RegisterListener(pm)
return pm, nil
}
-func (pm *persistManager) reset() {
+func (pm *persistManager) resetWithLock() error {
pm.status = persistManagerIdle
pm.start = timeZero
pm.count = 0
pm.bytesWritten = 0
pm.worked = 0
pm.slept = 0
- pm.indexPM.segmentWriter.Reset(nil)
- pm.indexPM.writeErr = nil
- pm.indexPM.initialized = false
pm.dataPM.snapshotID = nil
+
+ return pm.indexPM.segmentWriter.Reset(nil)
}
// StartIndexPersist is called by the databaseFlushManager to begin the persist process for
@@ -271,83 +353,32 @@ func (pm *persistManager) PrepareIndex(opts persist.IndexPrepareOptions) (persis
IndexVolumeType: opts.IndexVolumeType,
}
+ writer, err := pm.indexPM.newIndexWriterFn(pm.opts)
+ if err != nil {
+ return prepared, err
+ }
+ idxWriter := &singleUseIndexWriter{
+ manager: &pm.indexPM,
+ writer: writer,
+ state: singleUseIndexWriterState{
+ // track which file we are writing in the persist manager, so we
+ // know which file to read back on `closeIndex` being called.
+ fileSetIdentifier: fileSetID,
+ fileSetType: opts.FileSetType,
+ },
+ }
// create writer for required fileset file.
- if err := pm.indexPM.writer.Open(idxWriterOpts); err != nil {
+ if err := idxWriter.writer.Open(idxWriterOpts); err != nil {
return prepared, err
}
- // track which file we are writing in the persist manager, so we
- // know which file to read back on `closeIndex` being called.
- pm.indexPM.fileSetIdentifier = fileSetID
- pm.indexPM.fileSetType = opts.FileSetType
- pm.indexPM.initialized = true
-
// provide persistManager hooks into PreparedIndexPersist object
- prepared.Persist = pm.persistIndex
- prepared.Close = pm.closeIndex
+ prepared.Persist = idxWriter.persistIndex
+ prepared.Close = idxWriter.closeIndex
return prepared, nil
}
-func (pm *persistManager) persistIndex(builder segment.Builder) error {
- // FOLLOWUP(prateek): need to use-rate limiting runtime options in this code path
- markError := func(err error) {
- pm.indexPM.writeErr = err
- }
- if err := pm.indexPM.writeErr; err != nil {
- return fmt.Errorf("encountered error: %v, skipping further attempts to persist data", err)
- }
-
- if err := pm.indexPM.segmentWriter.Reset(builder); err != nil {
- markError(err)
- return err
- }
-
- if err := pm.indexPM.writer.WriteSegmentFileSet(pm.indexPM.segmentWriter); err != nil {
- markError(err)
- return err
- }
-
- return nil
-}
-
-func (pm *persistManager) closeIndex() ([]segment.Segment, error) {
- // ensure StartIndexPersist was called
- if !pm.indexPM.initialized {
- return nil, errPersistManagerNotPersisting
- }
- pm.indexPM.initialized = false
-
- // i.e. we're done writing all segments for PreparedIndexPersist.
- // so we can close the writer.
- if err := pm.indexPM.writer.Close(); err != nil {
- return nil, err
- }
-
- // only attempt to retrieve data if we have not encountered errors during
- // any writes.
- if err := pm.indexPM.writeErr; err != nil {
- return nil, err
- }
-
- // and then we get persistent segments backed by mmap'd data so the index
- // can safely evict the segment's we have just persisted.
- result, err := ReadIndexSegments(ReadIndexSegmentsOptions{
- ReaderOptions: IndexReaderOpenOptions{
- Identifier: pm.indexPM.fileSetIdentifier,
- FileSetType: pm.indexPM.fileSetType,
- },
- FilesystemOptions: pm.opts,
- newReaderFn: pm.indexPM.newReaderFn,
- newPersistentSegmentFn: pm.indexPM.newPersistentSegmentFn,
- })
- if err != nil {
- return nil, err
- }
-
- return result.Segments, nil
-}
-
// DoneIndex is called by the databaseFlushManager to finish the index persist process.
func (pm *persistManager) DoneIndex() error {
pm.Lock()
@@ -362,9 +393,7 @@ func (pm *persistManager) DoneIndex() error {
pm.metrics.throttleDurationMs.Update(float64(pm.slept / time.Millisecond))
// Reset state
- pm.reset()
-
- return nil
+ return pm.resetWithLock()
}
// StartFlushPersist is called by the databaseFlushManager to begin the persist process.
@@ -558,7 +587,7 @@ func (pm *persistManager) DoneFlush() error {
return errPersistManagerCannotDoneFlushNotFlush
}
- return pm.doneShared()
+ return pm.doneSharedWithLock()
}
// DoneSnapshot is called by the databaseFlushManager to finish the snapshot persist process.
@@ -594,7 +623,7 @@ func (pm *persistManager) DoneSnapshot(
return fmt.Errorf("error writing out snapshot metadata file: %v", err)
}
- return pm.doneShared()
+ return pm.doneSharedWithLock()
}
// Close all resources.
@@ -602,15 +631,13 @@ func (pm *persistManager) Close() {
pm.runtimeOptsListener.Close()
}
-func (pm *persistManager) doneShared() error {
+func (pm *persistManager) doneSharedWithLock() error {
// Emit timing metrics
pm.metrics.writeDurationMs.Update(float64(pm.worked / time.Millisecond))
pm.metrics.throttleDurationMs.Update(float64(pm.slept / time.Millisecond))
// Reset state
- pm.reset()
-
- return nil
+ return pm.resetWithLock()
}
func (pm *persistManager) dataFilesetExists(prepareOpts persist.DataPrepareOptions) (bool, error) {
diff --git a/src/dbnode/persist/fs/persist_manager_test.go b/src/dbnode/persist/fs/persist_manager_test.go
index cd0f39386d..362db4b5eb 100644
--- a/src/dbnode/persist/fs/persist_manager_test.go
+++ b/src/dbnode/persist/fs/persist_manager_test.go
@@ -313,15 +313,6 @@ func TestPersistenceManagerCloseData(t *testing.T) {
pm.closeData()
}
-func TestPersistenceManagerCloseIndex(t *testing.T) {
- ctrl := gomock.NewController(xtest.Reporter{T: t})
- defer ctrl.Finish()
-
- pm, _, _, _ := testIndexPersistManager(t, ctrl)
- defer os.RemoveAll(pm.filePathPrefix)
- pm.closeIndex()
-}
-
func TestPersistenceManagerPrepareIndexFileExists(t *testing.T) {
ctrl := gomock.NewController(xtest.Reporter{T: t})
defer ctrl.Finish()
@@ -421,17 +412,6 @@ func TestPersistenceManagerPrepareIndexSuccess(t *testing.T) {
pm, writer, segWriter, _ := testIndexPersistManager(t, ctrl)
defer os.RemoveAll(pm.filePathPrefix)
- blockStart := time.Unix(1000, 0)
- writerOpts := IndexWriterOpenOptions{
- Identifier: FileSetFileIdentifier{
- FileSetContentType: persist.FileSetIndexContentType,
- Namespace: testNs1ID,
- BlockStart: blockStart,
- },
- BlockSize: testBlockSize,
- }
- writer.EXPECT().Open(xtest.CmpMatcher(writerOpts, m3test.IdentTransformer)).Return(nil)
-
flush, err := pm.StartIndexPersist()
require.NoError(t, err)
@@ -440,46 +420,62 @@ func TestPersistenceManagerPrepareIndexSuccess(t *testing.T) {
assert.NoError(t, flush.DoneIndex())
}()
- prepareOpts := persist.IndexPrepareOptions{
- NamespaceMetadata: testNs1Metadata(t),
- BlockStart: blockStart,
- }
- prepared, err := flush.PrepareIndex(prepareOpts)
- require.NoError(t, err)
+ // We support preparing multiple index block writers for an index persist.
+ numBlocks := 10
+ blockStart := time.Unix(1000, 0)
+ for i := 1; i < numBlocks; i++ {
+ blockStart = blockStart.Add(time.Duration(i) * testBlockSize)
+ writerOpts := IndexWriterOpenOptions{
+ Identifier: FileSetFileIdentifier{
+ FileSetContentType: persist.FileSetIndexContentType,
+ Namespace: testNs1ID,
+ BlockStart: blockStart,
+ },
+ BlockSize: testBlockSize,
+ }
+ writer.EXPECT().Open(xtest.CmpMatcher(writerOpts, m3test.IdentTransformer)).Return(nil)
- seg := segment.NewMockMutableSegment(ctrl)
- segWriter.EXPECT().Reset(seg).Return(nil)
- writer.EXPECT().WriteSegmentFileSet(segWriter).Return(nil)
- require.NoError(t, prepared.Persist(seg))
+ prepareOpts := persist.IndexPrepareOptions{
+ NamespaceMetadata: testNs1Metadata(t),
+ BlockStart: blockStart,
+ }
+ prepared, err := flush.PrepareIndex(prepareOpts)
+ require.NoError(t, err)
- reader := NewMockIndexFileSetReader(ctrl)
- pm.indexPM.newReaderFn = func(Options) (IndexFileSetReader, error) {
- return reader, nil
- }
+ seg := segment.NewMockMutableSegment(ctrl)
+ segWriter.EXPECT().Reset(seg).Return(nil)
+ writer.EXPECT().WriteSegmentFileSet(segWriter).Return(nil)
+ require.NoError(t, prepared.Persist(seg))
- reader.EXPECT().Open(xtest.CmpMatcher(IndexReaderOpenOptions{
- Identifier: writerOpts.Identifier,
- }, m3test.IdentTransformer)).Return(IndexReaderOpenResult{}, nil)
+ reader := NewMockIndexFileSetReader(ctrl)
+ pm.indexPM.newReaderFn = func(Options) (IndexFileSetReader, error) {
+ return reader, nil
+ }
- file := NewMockIndexSegmentFile(ctrl)
- gomock.InOrder(
- reader.EXPECT().SegmentFileSets().Return(1),
- reader.EXPECT().ReadSegmentFileSet().Return(file, nil),
- reader.EXPECT().ReadSegmentFileSet().Return(nil, io.EOF),
- )
- fsSeg := m3ninxfs.NewMockSegment(ctrl)
- pm.indexPM.newPersistentSegmentFn = func(
- fset m3ninxpersist.IndexSegmentFileSet, opts m3ninxfs.Options,
- ) (m3ninxfs.Segment, error) {
- require.Equal(t, file, fset)
- return fsSeg, nil
- }
+ reader.EXPECT().Open(xtest.CmpMatcher(IndexReaderOpenOptions{
+ Identifier: writerOpts.Identifier,
+ }, m3test.IdentTransformer)).Return(IndexReaderOpenResult{}, nil)
+
+ file := NewMockIndexSegmentFile(ctrl)
+ gomock.InOrder(
+ reader.EXPECT().SegmentFileSets().Return(1),
+ reader.EXPECT().ReadSegmentFileSet().Return(file, nil),
+ reader.EXPECT().ReadSegmentFileSet().Return(nil, io.EOF),
+ )
+ fsSeg := m3ninxfs.NewMockSegment(ctrl)
+ pm.indexPM.newPersistentSegmentFn = func(
+ fset m3ninxpersist.IndexSegmentFileSet, opts m3ninxfs.Options,
+ ) (m3ninxfs.Segment, error) {
+ require.Equal(t, file, fset)
+ return fsSeg, nil
+ }
- writer.EXPECT().Close().Return(nil)
- segs, err := prepared.Close()
- require.NoError(t, err)
- require.Len(t, segs, 1)
- require.Equal(t, fsSeg, segs[0])
+ writer.EXPECT().Close().Return(nil)
+ segs, err := prepared.Close()
+ require.NoError(t, err)
+ require.Len(t, segs, 1)
+ require.Equal(t, fsSeg, segs[0])
+ }
}
func TestPersistenceManagerNoRateLimit(t *testing.T) {
@@ -766,7 +762,9 @@ func testIndexPersistManager(t *testing.T, ctrl *gomock.Controller,
require.NoError(t, err)
manager := mgr.(*persistManager)
- manager.indexPM.writer = writer
+ manager.indexPM.newIndexWriterFn = func(opts Options) (IndexFileSetWriter, error) {
+ return writer, nil
+ }
manager.indexPM.segmentWriter = segmentWriter
return manager, writer, segmentWriter, opts
}
diff --git a/src/dbnode/persist/fs/types.go b/src/dbnode/persist/fs/types.go
index 84f1c72fb4..20829bb8ea 100644
--- a/src/dbnode/persist/fs/types.go
+++ b/src/dbnode/persist/fs/types.go
@@ -597,7 +597,7 @@ type BlockRetrieverOptions interface {
// ForEachRemainingFn is the function that is run on each of the remaining
// series of the merge target that did not intersect with the fileset.
-type ForEachRemainingFn func(seriesMetadata doc.Document, data block.FetchBlockResult) error
+type ForEachRemainingFn func(seriesMetadata doc.Metadata, data block.FetchBlockResult) error
// MergeWith is an interface that the fs merger uses to merge data with.
type MergeWith interface {
diff --git a/src/dbnode/persist/persist_mock.go b/src/dbnode/persist/persist_mock.go
index 1ec52dcd02..351250cf06 100644
--- a/src/dbnode/persist/persist_mock.go
+++ b/src/dbnode/persist/persist_mock.go
@@ -341,3 +341,17 @@ func (mr *MockOnFlushSeriesMockRecorder) OnFlushNewSeries(arg0 interface{}) *gom
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnFlushNewSeries", reflect.TypeOf((*MockOnFlushSeries)(nil).OnFlushNewSeries), arg0)
}
+
+// CheckpointAndMaybeCompact mocks base method
+func (m *MockOnFlushSeries) CheckpointAndMaybeCompact() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CheckpointAndMaybeCompact")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CheckpointAndMaybeCompact indicates an expected call of CheckpointAndMaybeCompact
+func (mr *MockOnFlushSeriesMockRecorder) CheckpointAndMaybeCompact() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointAndMaybeCompact", reflect.TypeOf((*MockOnFlushSeries)(nil).CheckpointAndMaybeCompact))
+}
diff --git a/src/dbnode/persist/types.go b/src/dbnode/persist/types.go
index 4b4b195b53..84ca58a4bc 100644
--- a/src/dbnode/persist/types.go
+++ b/src/dbnode/persist/types.go
@@ -40,7 +40,7 @@ var errReusableTagIteratorRequired = errors.New("reusable tags iterator is requi
// Metadata is metadata for a time series, it can
// have several underlying sources.
type Metadata struct {
- metadata doc.Document
+ metadata doc.Metadata
id ident.ID
tags ident.Tags
tagsIter ident.TagIterator
@@ -55,11 +55,11 @@ type MetadataOptions struct {
}
// NewMetadata returns a new metadata struct from series metadata.
-// Note: because doc.Document has no pools for finalization we do not
+// Note: because doc.Metadata has no pools for finalization we do not
// take MetadataOptions here, in future if we have pools or
// some other shared options that Metadata needs we will add it to this
// constructor as well.
-func NewMetadata(metadata doc.Document) Metadata {
+func NewMetadata(metadata doc.Metadata) Metadata {
return Metadata{metadata: metadata}
}
@@ -336,7 +336,7 @@ const (
type SeriesMetadataType uint8
const (
- // SeriesDocumentType means the metadata is in doc.Document form.
+ // SeriesDocumentType means the metadata is in doc.Metadata form.
SeriesDocumentType SeriesMetadataType = iota
// SeriesIDAndEncodedTagsType means the metadata is in IDAndEncodedTags form.
SeriesIDAndEncodedTagsType
@@ -351,7 +351,7 @@ type IDAndEncodedTags struct {
// SeriesMetadata captures different representations of series metadata and
// the ownership status of the underlying memory.
type SeriesMetadata struct {
- Document doc.Document
+ Document doc.Metadata
IDAndEncodedTags IDAndEncodedTags
Type SeriesMetadataType
LifeTime SeriesMetadataLifeTime
@@ -366,13 +366,21 @@ type OnFlushNewSeriesEvent struct {
}
// OnFlushSeries performs work on a per series level.
+// Also exposes a checkpoint fn for maybe compacting multiple index segments based on size.
type OnFlushSeries interface {
OnFlushNewSeries(OnFlushNewSeriesEvent) error
+
+ // CheckpointAndMaybeCompact checks to see if we're at maximum cardinality
+ // for any index segments we're currently building and compact if we are.
+ CheckpointAndMaybeCompact() error
}
// NoOpColdFlushNamespace is a no-op impl of OnFlushSeries.
type NoOpColdFlushNamespace struct{}
+// CheckpointAndMaybeCompact is a no-op.
+func (n *NoOpColdFlushNamespace) CheckpointAndMaybeCompact() error { return nil }
+
// OnFlushNewSeries is a no-op.
func (n *NoOpColdFlushNamespace) OnFlushNewSeries(event OnFlushNewSeriesEvent) error {
return nil
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
index ea17ffbaef..8848be7ed3 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/fs/source.go
@@ -721,9 +721,9 @@ func (s *fileSystemSource) readNextEntryAndRecordBlock(
func (s *fileSystemSource) readNextEntryAndMaybeIndex(
r fs.DataFileSetReader,
- batch []doc.Document,
+ batch []doc.Metadata,
builder *result.IndexBuilder,
-) ([]doc.Document, error) {
+) ([]doc.Metadata, error) {
// If performing index run, then simply read the metadata and add to segment.
id, tagsIter, _, _, err := r.ReadMetadata()
if err != nil {
diff --git a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
index f5bc15c9b6..bb433278a7 100644
--- a/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
+++ b/src/dbnode/storage/bootstrap/bootstrapper/peers/source.go
@@ -1159,9 +1159,9 @@ func (s *peersSource) processReaders(
func (s *peersSource) readNextEntryAndMaybeIndex(
r fs.DataFileSetReader,
- batch []doc.Document,
+ batch []doc.Metadata,
builder *result.IndexBuilder,
-) ([]doc.Document, error) {
+) ([]doc.Metadata, error) {
// If performing index run, then simply read the metadata and add to segment.
id, tagsIter, _, _, err := r.ReadMetadata()
if err != nil {
diff --git a/src/dbnode/storage/bootstrap/result/result_index.go b/src/dbnode/storage/bootstrap/result/result_index.go
index 1a398139a1..34cb726d76 100644
--- a/src/dbnode/storage/bootstrap/result/result_index.go
+++ b/src/dbnode/storage/bootstrap/result/result_index.go
@@ -91,7 +91,7 @@ func NewIndexBuilder(builder segment.DocumentsBuilder) *IndexBuilder {
}
// FlushBatch flushes a batch of documents to the underlying segment builder.
-func (b *IndexBuilder) FlushBatch(batch []doc.Document) ([]doc.Document, error) {
+func (b *IndexBuilder) FlushBatch(batch []doc.Metadata) ([]doc.Metadata, error) {
if len(batch) == 0 {
// Last flush might not have any docs enqueued
return batch, nil
@@ -119,7 +119,7 @@ func (b *IndexBuilder) FlushBatch(batch []doc.Document) ([]doc.Document, error)
}
// Reset docs batch for reuse
- var empty doc.Document
+ var empty doc.Metadata
for i := range batch {
batch[i] = empty
}
diff --git a/src/dbnode/storage/fs_merge_with_mem_test.go b/src/dbnode/storage/fs_merge_with_mem_test.go
index 3e982e1321..0680cb75b7 100644
--- a/src/dbnode/storage/fs_merge_with_mem_test.go
+++ b/src/dbnode/storage/fs_merge_with_mem_test.go
@@ -172,7 +172,7 @@ func TestForEachRemaining(t *testing.T) {
mergeWith := newFSMergeWithMem(shard, retriever, dirtySeries, dirtySeriesToWrite)
- var forEachCalls []doc.Document
+ var forEachCalls []doc.Metadata
shard.EXPECT().
FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id0"),
xtime.UnixNano(0).ToTime(), version+1, gomock.Any()).
@@ -181,10 +181,11 @@ func TestForEachRemaining(t *testing.T) {
FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id1"),
xtime.UnixNano(0).ToTime(), version+1, gomock.Any()).
Return(result, nil)
- mergeWith.ForEachRemaining(ctx, 0, func(seriesMetadata doc.Document, result block.FetchBlockResult) error {
+ err := mergeWith.ForEachRemaining(ctx, 0, func(seriesMetadata doc.Metadata, result block.FetchBlockResult) error {
forEachCalls = append(forEachCalls, seriesMetadata)
return nil
}, nsCtx)
+ require.NoError(t, err)
require.Len(t, forEachCalls, 2)
assert.Equal(t, id0.Bytes(), forEachCalls[0].ID)
assert.Equal(t, id1.Bytes(), forEachCalls[1].ID)
@@ -209,7 +210,7 @@ func TestForEachRemaining(t *testing.T) {
FetchBlocksForColdFlush(gomock.Any(), ident.NewIDMatcher("id4"),
xtime.UnixNano(1).ToTime(), version+1, gomock.Any()).
Return(result, nil)
- err = mergeWith.ForEachRemaining(ctx, 1, func(seriesMetadata doc.Document, result block.FetchBlockResult) error {
+ err = mergeWith.ForEachRemaining(ctx, 1, func(seriesMetadata doc.Metadata, result block.FetchBlockResult) error {
forEachCalls = append(forEachCalls, seriesMetadata)
return nil
}, nsCtx)
@@ -224,7 +225,7 @@ func TestForEachRemaining(t *testing.T) {
Return(result, nil)
// Test call with bad function execution.
- err = mergeWith.ForEachRemaining(ctx, 4, func(seriesMetadata doc.Document, result block.FetchBlockResult) error {
+ err = mergeWith.ForEachRemaining(ctx, 4, func(seriesMetadata doc.Metadata, result block.FetchBlockResult) error {
return errors.New("bad")
}, nsCtx)
assert.Error(t, err)
@@ -241,7 +242,7 @@ func addDirtySeries(
seriesList = newIDList(nil)
dirtySeriesToWrite[start] = seriesList
}
- element := seriesList.PushBack(doc.Document{ID: id.Bytes()})
+ element := seriesList.PushBack(doc.Metadata{ID: id.Bytes()})
dirtySeries.Set(idAndBlockStart{blockStart: start, id: id.Bytes()}, element)
}
diff --git a/src/dbnode/storage/id_list_gen.go b/src/dbnode/storage/id_list_gen.go
index c106afce10..a453d9f7d2 100644
--- a/src/dbnode/storage/id_list_gen.go
+++ b/src/dbnode/storage/id_list_gen.go
@@ -99,7 +99,7 @@ type idElement struct {
list *idList
// The value stored with this element.
- Value doc.Document
+ Value doc.Metadata
}
// Next returns the next list element or nil.
@@ -198,7 +198,7 @@ func (l *idList) insert(e, at *idElement) *idElement {
}
// insertValue is a convenience wrapper for inserting using the list's pool.
-func (l *idList) insertValue(v doc.Document, at *idElement) *idElement {
+func (l *idList) insertValue(v doc.Metadata, at *idElement) *idElement {
e := l.Pool.get()
e.Value = v
return l.insert(e, at)
@@ -218,7 +218,7 @@ func (l *idList) remove(e *idElement) *idElement {
// Remove removes e from l if e is an element of list l.
// It returns the element value e.Value.
// The element must not be nil.
-func (l *idList) Remove(e *idElement) doc.Document {
+func (l *idList) Remove(e *idElement) doc.Metadata {
if e.list == l {
// if e.list == l, l must have been initialized when e was inserted
// in l or l == nil (e is a zero Element) and l.remove will crash.
@@ -229,13 +229,13 @@ func (l *idList) Remove(e *idElement) doc.Document {
}
// PushFront inserts a new element e with value v at the front of list l and returns e.
-func (l *idList) PushFront(v doc.Document) *idElement {
+func (l *idList) PushFront(v doc.Metadata) *idElement {
l.lazyInit()
return l.insertValue(v, &l.root)
}
// PushBack inserts a new element e with value v at the back of list l and returns e.
-func (l *idList) PushBack(v doc.Document) *idElement {
+func (l *idList) PushBack(v doc.Metadata) *idElement {
l.lazyInit()
return l.insertValue(v, l.root.prev)
}
@@ -243,7 +243,7 @@ func (l *idList) PushBack(v doc.Document) *idElement {
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
-func (l *idList) InsertBefore(v doc.Document, mark *idElement) *idElement {
+func (l *idList) InsertBefore(v doc.Metadata, mark *idElement) *idElement {
if mark.list != l {
return nil
}
@@ -254,7 +254,7 @@ func (l *idList) InsertBefore(v doc.Document, mark *idElement) *idElement {
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
// If mark is not an element of l, the list is not modified.
// The mark must not be nil.
-func (l *idList) InsertAfter(v doc.Document, mark *idElement) *idElement {
+func (l *idList) InsertAfter(v doc.Metadata, mark *idElement) *idElement {
if mark.list != l {
return nil
}
diff --git a/src/dbnode/storage/index.go b/src/dbnode/storage/index.go
index c97d717708..b1481c1c34 100644
--- a/src/dbnode/storage/index.go
+++ b/src/dbnode/storage/index.go
@@ -685,7 +685,7 @@ func (i *nsIndex) writeBatches(
// doc is valid. Add potential forward writes to the forwardWriteBatch.
batch.ForEach(
func(idx int, entry index.WriteBatchEntry,
- d doc.Document, _ index.WriteBatchEntryResult) {
+ d doc.Metadata, _ index.WriteBatchEntryResult) {
total++
if len(i.doNotIndexWithFields) != 0 {
diff --git a/src/dbnode/storage/index/aggregate_results.go b/src/dbnode/storage/index/aggregate_results.go
index 01c1c3eadc..adfe613600 100644
--- a/src/dbnode/storage/index/aggregate_results.go
+++ b/src/dbnode/storage/index/aggregate_results.go
@@ -104,7 +104,7 @@ func (r *aggregatedResults) Reset(
r.Unlock()
}
-func (r *aggregatedResults) AddDocuments(batch []doc.Document) (int, int, error) {
+func (r *aggregatedResults) AddDocuments(batch []doc.Metadata) (int, int, error) {
r.Lock()
err := r.addDocumentsBatchWithLock(batch)
size := r.resultsMap.Len()
@@ -162,7 +162,7 @@ func (r *aggregatedResults) AddFields(batch []AggregateResultsEntry) (int, int)
}
func (r *aggregatedResults) addDocumentsBatchWithLock(
- batch []doc.Document,
+ batch []doc.Metadata,
) error {
for _, doc := range batch {
switch r.aggregateOpts.Type {
@@ -193,7 +193,7 @@ func (r *aggregatedResults) addDocumentsBatchWithLock(
}
func (r *aggregatedResults) addDocumentTermsWithLock(
- document doc.Document,
+ document doc.Metadata,
) error {
for _, field := range document.Fields {
if err := r.addTermWithLock(field.Name); err != nil {
@@ -233,7 +233,7 @@ func (r *aggregatedResults) addTermWithLock(
}
func (r *aggregatedResults) addDocumentWithLock(
- document doc.Document,
+ document doc.Metadata,
) error {
for _, field := range document.Fields {
if err := r.addFieldWithLock(field.Name, field.Value); err != nil {
diff --git a/src/dbnode/storage/index/aggregate_results_test.go b/src/dbnode/storage/index/aggregate_results_test.go
index 32a311f28c..4a92a9f900 100644
--- a/src/dbnode/storage/index/aggregate_results_test.go
+++ b/src/dbnode/storage/index/aggregate_results_test.go
@@ -32,7 +32,7 @@ import (
"github.com/stretchr/testify/require"
)
-func genDoc(strs ...string) doc.Document {
+func genDoc(strs ...string) doc.Metadata {
if len(strs)%2 != 0 {
panic("invalid test setup; need even str length")
}
@@ -45,15 +45,15 @@ func genDoc(strs ...string) doc.Document {
}
}
- return doc.Document{Fields: fields}
+ return doc.Metadata{Fields: fields}
}
func TestAggResultsInsertInvalid(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
assert.True(t, res.EnforceLimits())
- dInvalid := doc.Document{Fields: []doc.Field{{}}}
- size, docsCount, err := res.AddDocuments([]doc.Document{dInvalid})
+ dInvalid := doc.Metadata{Fields: []doc.Field{{}}}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
require.Equal(t, 1, docsCount)
@@ -62,7 +62,7 @@ func TestAggResultsInsertInvalid(t *testing.T) {
require.Equal(t, 1, res.TotalDocsCount())
dInvalid = genDoc("", "foo")
- size, docsCount, err = res.AddDocuments([]doc.Document{dInvalid})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
require.Equal(t, 2, docsCount)
@@ -74,7 +74,7 @@ func TestAggResultsInsertInvalid(t *testing.T) {
func TestAggResultsInsertEmptyTermValue(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
dValidEmptyTerm := genDoc("foo", "")
- size, docsCount, err := res.AddDocuments([]doc.Document{dValidEmptyTerm})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValidEmptyTerm})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -87,7 +87,7 @@ func TestAggResultsInsertBatchOfTwo(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
d1 := genDoc("d1", "")
d2 := genDoc("d2", "")
- size, docsCount, err := res.AddDocuments([]doc.Document{d1, d2})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1, d2})
require.NoError(t, err)
require.Equal(t, 2, size)
require.Equal(t, 2, docsCount)
@@ -100,8 +100,8 @@ func TestAggResultsTermOnlyInsert(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{
Type: AggregateTagNames,
}, testOpts)
- dInvalid := doc.Document{Fields: []doc.Field{{}}}
- size, docsCount, err := res.AddDocuments([]doc.Document{dInvalid})
+ dInvalid := doc.Metadata{Fields: []doc.Field{{}}}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
require.Equal(t, 1, docsCount)
@@ -110,7 +110,7 @@ func TestAggResultsTermOnlyInsert(t *testing.T) {
require.Equal(t, 1, res.TotalDocsCount())
dInvalid = genDoc("", "foo")
- size, docsCount, err = res.AddDocuments([]doc.Document{dInvalid})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
require.Equal(t, 2, docsCount)
@@ -119,7 +119,7 @@ func TestAggResultsTermOnlyInsert(t *testing.T) {
require.Equal(t, 2, res.TotalDocsCount())
valid := genDoc("foo", "")
- size, docsCount, err = res.AddDocuments([]doc.Document{valid})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{valid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 3, docsCount)
@@ -130,7 +130,7 @@ func TestAggResultsTermOnlyInsert(t *testing.T) {
func testAggResultsInsertIdempotency(t *testing.T, res AggregateResults) {
dValid := genDoc("foo", "bar")
- size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -138,7 +138,7 @@ func testAggResultsInsertIdempotency(t *testing.T, res AggregateResults) {
require.Equal(t, 1, res.Size())
require.Equal(t, 1, res.TotalDocsCount())
- size, docsCount, err = res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 2, docsCount)
@@ -164,7 +164,7 @@ func TestInvalidAggregateType(t *testing.T) {
Type: 100,
}, testOpts)
dValid := genDoc("foo", "bar")
- size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValid})
require.Error(t, err)
require.Equal(t, 0, size)
require.Equal(t, 1, docsCount)
@@ -173,7 +173,7 @@ func TestInvalidAggregateType(t *testing.T) {
func TestAggResultsSameName(t *testing.T) {
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
d1 := genDoc("foo", "bar")
- size, docsCount, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -185,7 +185,7 @@ func TestAggResultsSameName(t *testing.T) {
assert.True(t, aggVals.Map().Contains(ident.StringID("bar")))
d2 := genDoc("foo", "biz")
- size, docsCount, err = res.AddDocuments([]doc.Document{d2})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{d2})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 2, docsCount)
@@ -212,7 +212,7 @@ func TestAggResultsTermOnlySameName(t *testing.T) {
Type: AggregateTagNames,
}, testOpts)
d1 := genDoc("foo", "bar")
- size, docsCount, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -223,7 +223,7 @@ func TestAggResultsTermOnlySameName(t *testing.T) {
assertNoValuesInNameOnlyAggregate(t, aggVals)
d2 := genDoc("foo", "biz")
- size, docsCount, err = res.AddDocuments([]doc.Document{d2})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{d2})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 2, docsCount)
@@ -235,20 +235,20 @@ func TestAggResultsTermOnlySameName(t *testing.T) {
}
func addMultipleDocuments(t *testing.T, res AggregateResults) (int, int) {
- _, _, err := res.AddDocuments([]doc.Document{
+ _, _, err := res.AddDocuments([]doc.Metadata{
genDoc("foo", "bar"),
genDoc("fizz", "bar"),
genDoc("buzz", "bar"),
})
require.NoError(t, err)
- _, _, err = res.AddDocuments([]doc.Document{
+ _, _, err = res.AddDocuments([]doc.Metadata{
genDoc("foo", "biz"),
genDoc("fizz", "bar"),
})
require.NoError(t, err)
- size, docsCount, err := res.AddDocuments([]doc.Document{
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{
genDoc("foo", "baz", "buzz", "bag", "qux", "qaz"),
})
@@ -378,7 +378,7 @@ func TestAggResultsInsertCopies(t *testing.T) {
dValid := genDoc("foo", "bar")
name := dValid.Fields[0].Name
value := dValid.Fields[0].Value
- size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -420,7 +420,7 @@ func TestAggResultsNameOnlyInsertCopies(t *testing.T) {
}, testOpts)
dValid := genDoc("foo", "bar")
name := dValid.Fields[0].Name
- size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -450,7 +450,7 @@ func TestAggResultsReset(t *testing.T) {
res := NewAggregateResults(ident.StringID("qux"),
AggregateResultsOptions{}, testOpts)
d1 := genDoc("foo", "bar")
- size, docsCount, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -500,7 +500,7 @@ func TestAggResultFinalize(t *testing.T) {
// Create a Results and insert some data.
res := NewAggregateResults(nil, AggregateResultsOptions{}, testOpts)
d1 := genDoc("foo", "bar")
- size, docsCount, err := res.AddDocuments([]doc.Document{d1})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
diff --git a/src/dbnode/storage/index/block.go b/src/dbnode/storage/index/block.go
index cbb641e6e5..94892d232f 100644
--- a/src/dbnode/storage/index/block.go
+++ b/src/dbnode/storage/index/block.go
@@ -533,9 +533,9 @@ func (b *block) closeAsync(closer io.Closer) {
func (b *block) addQueryResults(
cancellable *xresource.CancellableLifetime,
results BaseResults,
- batch []doc.Document,
+ batch []doc.Metadata,
source []byte,
-) ([]doc.Document, int, int, error) {
+) ([]doc.Metadata, int, int, error) {
// update recently queried docs to monitor memory.
if results.EnforceLimits() {
if err := b.docsLimit.Inc(len(batch), source); err != nil {
@@ -557,7 +557,7 @@ func (b *block) addQueryResults(
cancellable.ReleaseCheckout()
// reset batch.
- var emptyDoc doc.Document
+ var emptyDoc doc.Metadata
for i := range batch {
batch[i] = emptyDoc
}
diff --git a/src/dbnode/storage/index/block_bench_test.go b/src/dbnode/storage/index/block_bench_test.go
index 180b5048d3..ec742a977a 100644
--- a/src/dbnode/storage/index/block_bench_test.go
+++ b/src/dbnode/storage/index/block_bench_test.go
@@ -78,7 +78,7 @@ func BenchmarkBlockWrite(b *testing.B) {
Timestamp: now,
OnIndexSeries: onIndexSeries,
EnqueuedAt: now,
- }, doc.Document{
+ }, doc.Metadata{
ID: []byte(fmt.Sprintf("doc.%d", i)),
Fields: fields,
})
diff --git a/src/dbnode/storage/index/block_test.go b/src/dbnode/storage/index/block_test.go
index e8f4e64ee4..67f8710f9e 100644
--- a/src/dbnode/storage/index/block_test.go
+++ b/src/dbnode/storage/index/block_test.go
@@ -113,7 +113,7 @@ func TestBlockWriteAfterClose(t *testing.T) {
batch.Append(WriteBatchEntry{
Timestamp: nowNotBlockStartAligned,
OnIndexSeries: lifecycle,
- }, doc.Document{})
+ }, doc.Metadata{})
res, err := b.WriteBatch(batch)
require.Error(t, err)
@@ -124,7 +124,7 @@ func TestBlockWriteAfterClose(t *testing.T) {
batch.ForEach(func(
idx int,
entry WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
result WriteBatchEntryResult,
) {
verified++
@@ -162,7 +162,7 @@ func TestBlockWriteAfterSeal(t *testing.T) {
batch.Append(WriteBatchEntry{
Timestamp: nowNotBlockStartAligned,
OnIndexSeries: lifecycle,
- }, doc.Document{})
+ }, doc.Metadata{})
res, err := b.WriteBatch(batch)
require.Error(t, err)
@@ -173,7 +173,7 @@ func TestBlockWriteAfterSeal(t *testing.T) {
batch.ForEach(func(
idx int,
entry WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
result WriteBatchEntryResult,
) {
verified++
@@ -270,7 +270,7 @@ func TestBlockWriteActualSegmentPartialFailure(t *testing.T) {
batch.Append(WriteBatchEntry{
Timestamp: nowNotBlockStartAligned,
OnIndexSeries: h2,
- }, doc.Document{})
+ }, doc.Metadata{})
res, err := b.WriteBatch(batch)
require.Error(t, err)
require.Equal(t, int64(1), res.NumSuccess)
@@ -280,7 +280,7 @@ func TestBlockWriteActualSegmentPartialFailure(t *testing.T) {
batch.ForEach(func(
idx int,
entry WriteBatchEntry,
- _ doc.Document,
+ _ doc.Metadata,
result WriteBatchEntryResult,
) {
verified++
@@ -331,7 +331,7 @@ func TestBlockWritePartialFailure(t *testing.T) {
batch.Append(WriteBatchEntry{
Timestamp: nowNotBlockStartAligned,
OnIndexSeries: h2,
- }, doc.Document{})
+ }, doc.Metadata{})
res, err := b.WriteBatch(batch)
require.Error(t, err)
@@ -342,7 +342,7 @@ func TestBlockWritePartialFailure(t *testing.T) {
batch.ForEach(func(
idx int,
entry WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
result WriteBatchEntryResult,
) {
verified++
@@ -857,7 +857,7 @@ func TestBlockMockQueryMergeResultsMapLimit(t *testing.T) {
limit := 1
results := NewQueryResults(nil,
QueryResultsOptions{SizeLimit: limit}, testOpts)
- _, _, err = results.AddDocuments([]doc.Document{testDoc1()})
+ _, _, err = results.AddDocuments([]doc.Metadata{testDoc1()})
require.NoError(t, err)
dIter := doc.NewMockIterator(ctrl)
@@ -908,7 +908,7 @@ func TestBlockMockQueryMergeResultsDupeID(t *testing.T) {
}
results := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- _, _, err = results.AddDocuments([]doc.Document{testDoc1()})
+ _, _, err = results.AddDocuments([]doc.Metadata{testDoc1()})
require.NoError(t, err)
dIter := doc.NewMockIterator(ctrl)
@@ -2146,7 +2146,7 @@ func assertAggregateResultsMapEquals(t *testing.T, expected map[string][]string,
}
}
-func testSegment(t *testing.T, docs ...doc.Document) segment.Segment {
+func testSegment(t *testing.T, docs ...doc.Metadata) segment.Segment {
seg, err := mem.NewSegment(testOpts.MemSegmentOptions())
require.NoError(t, err)
@@ -2158,8 +2158,8 @@ func testSegment(t *testing.T, docs ...doc.Document) segment.Segment {
return seg
}
-func testDoc1() doc.Document {
- return doc.Document{
+func testDoc1() doc.Metadata {
+ return doc.Metadata{
ID: []byte("foo"),
Fields: []doc.Field{
doc.Field{
@@ -2170,8 +2170,8 @@ func testDoc1() doc.Document {
}
}
-func testDoc1DupeID() doc.Document {
- return doc.Document{
+func testDoc1DupeID() doc.Metadata {
+ return doc.Metadata{
ID: []byte("foo"),
Fields: []doc.Field{
doc.Field{
@@ -2186,8 +2186,8 @@ func testDoc1DupeID() doc.Document {
}
}
-func testDoc2() doc.Document {
- return doc.Document{
+func testDoc2() doc.Metadata {
+ return doc.Metadata{
ID: []byte("something"),
Fields: []doc.Field{
doc.Field{
@@ -2202,8 +2202,8 @@ func testDoc2() doc.Document {
}
}
-func testDoc3() doc.Document {
- return doc.Document{
+func testDoc3() doc.Metadata {
+ return doc.Metadata{
ID: []byte("bar"),
Fields: []doc.Field{
doc.Field{
diff --git a/src/dbnode/storage/index/compaction/compactor.go b/src/dbnode/storage/index/compaction/compactor.go
index 288d63ef35..7ec097fcc0 100644
--- a/src/dbnode/storage/index/compaction/compactor.go
+++ b/src/dbnode/storage/index/compaction/compactor.go
@@ -179,7 +179,7 @@ func (c *Compactor) CompactUsingBuilder(
}
// Reset docs batch for reuse
- var empty doc.Document
+ var empty doc.Metadata
for i := range batch {
batch[i] = empty
}
@@ -273,7 +273,7 @@ func (c *Compactor) compactFromBuilderWithLock(
// If retaining references to the original docs, simply take ownership
// of the documents and then reference them directly from the FST segment
// rather than encoding them and mmap'ing the encoded documents.
- allDocsCopy := make([]doc.Document, len(allDocs))
+ allDocsCopy := make([]doc.Metadata, len(allDocs))
copy(allDocsCopy, allDocs)
fstData.DocsReader = docs.NewSliceReader(allDocsCopy)
} else {
diff --git a/src/dbnode/storage/index/compaction/compactor_test.go b/src/dbnode/storage/index/compaction/compactor_test.go
index 7a40e12b7b..631d8c892b 100644
--- a/src/dbnode/storage/index/compaction/compactor_test.go
+++ b/src/dbnode/storage/index/compaction/compactor_test.go
@@ -40,28 +40,28 @@ var (
testMemSegmentOptions = mem.NewOptions()
testBuilderSegmentOptions = builder.NewOptions()
- testDocuments = []doc.Document{
- doc.Document{
+ testDocuments = []doc.Metadata{
+ {
ID: []byte("one"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
ID: []byte("two"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
@@ -189,7 +189,7 @@ func TestCompactorCompactDuplicateIDsNoError(t *testing.T) {
require.NoError(t, compactor.Close())
}
-func assertContents(t *testing.T, seg segment.Segment, docs []doc.Document) {
+func assertContents(t *testing.T, seg segment.Segment, docs []doc.Metadata) {
// Ensure has contents
require.Equal(t, int64(len(docs)), seg.Size())
reader, err := seg.Reader()
diff --git a/src/dbnode/storage/index/convert/convert.go b/src/dbnode/storage/index/convert/convert.go
index effb5aad1c..e6013362ee 100644
--- a/src/dbnode/storage/index/convert/convert.go
+++ b/src/dbnode/storage/index/convert/convert.go
@@ -48,7 +48,7 @@ var (
)
// Validate returns a bool indicating whether the document is valid.
-func Validate(d doc.Document) error {
+func Validate(d doc.Metadata) error {
if !utf8.Valid(d.ID) {
return fmt.Errorf("document has invalid non-UTF8 ID: id=%v, id_hex=%x",
d.ID, d.ID)
@@ -107,7 +107,7 @@ func ValidateSeriesTag(tag ident.Tag) error {
}
// FromSeriesIDAndTags converts the provided series id+tags into a document.
-func FromSeriesIDAndTags(id ident.ID, tags ident.Tags) (doc.Document, error) {
+func FromSeriesIDAndTags(id ident.ID, tags ident.Tags) (doc.Metadata, error) {
clonedID := clone(id)
fields := make([]doc.Field, 0, len(tags.Values()))
for _, tag := range tags.Values() {
@@ -131,18 +131,18 @@ func FromSeriesIDAndTags(id ident.ID, tags ident.Tags) (doc.Document, error) {
})
}
- d := doc.Document{
+ d := doc.Metadata{
ID: clonedID,
Fields: fields,
}
if err := Validate(d); err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
return d, nil
}
// FromSeriesIDAndTagIter converts the provided series id+tags into a document.
-func FromSeriesIDAndTagIter(id ident.ID, tags ident.TagIterator) (doc.Document, error) {
+func FromSeriesIDAndTagIter(id ident.ID, tags ident.TagIterator) (doc.Metadata, error) {
clonedID := clone(id)
fields := make([]doc.Field, 0, tags.Remaining())
for tags.Next() {
@@ -167,15 +167,15 @@ func FromSeriesIDAndTagIter(id ident.ID, tags ident.TagIterator) (doc.Document,
})
}
if err := tags.Err(); err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
- d := doc.Document{
+ d := doc.Metadata{
ID: clonedID,
Fields: fields,
}
if err := Validate(d); err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
return d, nil
}
@@ -283,7 +283,7 @@ func (o Opts) wrapBytes(b []byte) ident.ID {
}
// ToSeries converts the provided doc to metric id+tags.
-func ToSeries(d doc.Document, opts Opts) (ident.ID, ident.TagIterator, error) {
+func ToSeries(d doc.Metadata, opts Opts) (ident.ID, ident.TagIterator, error) {
if len(d.ID) == 0 {
return nil, nil, errInvalidResultMissingID
}
@@ -291,11 +291,11 @@ func ToSeries(d doc.Document, opts Opts) (ident.ID, ident.TagIterator, error) {
}
// ToSeriesTags converts the provided doc to metric tags.
-func ToSeriesTags(d doc.Document, opts Opts) ident.TagIterator {
+func ToSeriesTags(d doc.Metadata, opts Opts) ident.TagIterator {
return newTagIter(d, opts)
}
-// tagIter exposes an ident.TagIterator interface over a doc.Document.
+// tagIter exposes an ident.TagIterator interface over a doc.Metadata.
type tagIter struct {
docFields doc.Fields
@@ -310,7 +310,7 @@ type tagIter struct {
// NB: force tagIter to implement the ident.TagIterator interface.
var _ ident.TagIterator = &tagIter{}
-func newTagIter(d doc.Document, opts Opts) ident.TagIterator {
+func newTagIter(d doc.Metadata, opts Opts) ident.TagIterator {
return &tagIter{
docFields: d.Fields,
currentIdx: -1,
diff --git a/src/dbnode/storage/index/convert/convert_test.go b/src/dbnode/storage/index/convert/convert_test.go
index 932dc3c493..07c5d37873 100644
--- a/src/dbnode/storage/index/convert/convert_test.go
+++ b/src/dbnode/storage/index/convert/convert_test.go
@@ -94,7 +94,7 @@ func TestFromSeriesIDAndTagIterValid(t *testing.T) {
}
func TestToSeriesValid(t *testing.T) {
- d := doc.Document{
+ d := doc.Metadata{
ID: []byte("foo"),
Fields: []doc.Field{
doc.Field{Name: []byte("bar"), Value: []byte("baz")},
@@ -140,7 +140,7 @@ func TestTagsFromTagsIterNoPool(t *testing.T) {
}
func TestToSeriesInvalidID(t *testing.T) {
- d := doc.Document{
+ d := doc.Metadata{
Fields: []doc.Field{
doc.Field{Name: []byte("bar"), Value: []byte("baz")},
},
@@ -150,7 +150,7 @@ func TestToSeriesInvalidID(t *testing.T) {
}
func TestToSeriesInvalidTag(t *testing.T) {
- d := doc.Document{
+ d := doc.Metadata{
ID: []byte("foo"),
Fields: []doc.Field{
doc.Field{Name: convert.ReservedFieldNameID, Value: []byte("baz")},
diff --git a/src/dbnode/storage/index/fields_terms_iterator_test.go b/src/dbnode/storage/index/fields_terms_iterator_test.go
index b1ae28c8dd..706d49d68e 100644
--- a/src/dbnode/storage/index/fields_terms_iterator_test.go
+++ b/src/dbnode/storage/index/fields_terms_iterator_test.go
@@ -64,11 +64,13 @@ func TestFieldsTermsIteratorSimple(t *testing.T) {
func TestFieldsTermsIteratorReuse(t *testing.T) {
pairs := []pair{
- pair{"a", "b"}, pair{"a", "c"},
- pair{"d", "e"}, pair{"d", "f"},
- pair{"g", "h"},
- pair{"i", "j"},
- pair{"k", "l"},
+ {"a", "b"},
+ {"a", "c"},
+ {"d", "e"},
+ {"d", "f"},
+ {"g", "h"},
+ {"i", "j"},
+ {"k", "l"},
}
iter, err := newFieldsAndTermsIterator(nil, fieldsAndTermsIteratorOpts{})
@@ -91,9 +93,10 @@ func TestFieldsTermsIteratorReuse(t *testing.T) {
require.NoError(t, err)
slice := toSlice(t, iter)
requireSlicesEqual(t, []pair{
- pair{"d", "e"}, pair{"d", "f"},
- pair{"g", "h"},
- pair{"i", "j"},
+ {"d", "e"},
+ {"d", "f"},
+ {"g", "h"},
+ {"i", "j"},
}, slice)
err = iter.Reset(reader, fieldsAndTermsIteratorOpts{
@@ -105,18 +108,21 @@ func TestFieldsTermsIteratorReuse(t *testing.T) {
require.NoError(t, err)
slice = toSlice(t, iter)
requireSlicesEqual(t, []pair{
- pair{"a", "b"}, pair{"a", "c"},
- pair{"k", "l"},
+ {"a", "b"},
+ {"a", "c"},
+ {"k", "l"},
}, slice)
}
func TestFieldsTermsIteratorSimpleSkip(t *testing.T) {
input := []pair{
- pair{"a", "b"}, pair{"a", "c"},
- pair{"d", "e"}, pair{"d", "f"},
- pair{"g", "h"},
- pair{"i", "j"},
- pair{"k", "l"},
+ {"a", "b"},
+ {"a", "c"},
+ {"d", "e"},
+ {"d", "f"},
+ {"g", "h"},
+ {"i", "j"},
+ {"k", "l"},
}
s := newFieldsTermsIterSetup(input...)
reader, err := s.asSegment(t).Reader()
@@ -131,16 +137,19 @@ func TestFieldsTermsIteratorSimpleSkip(t *testing.T) {
require.NoError(t, err)
slice := toSlice(t, iter)
requireSlicesEqual(t, []pair{
- pair{"d", "e"}, pair{"d", "f"},
- pair{"g", "h"},
- pair{"i", "j"},
+ {"d", "e"},
+ {"d", "f"},
+ {"g", "h"},
+ {"i", "j"},
}, slice)
}
func TestFieldsTermsIteratorTermsOnly(t *testing.T) {
s := newFieldsTermsIterSetup(
- pair{"a", "b"}, pair{"a", "c"},
- pair{"d", "e"}, pair{"d", "f"},
+ pair{"a", "b"},
+ pair{"a", "c"},
+ pair{"d", "e"},
+ pair{"d", "f"},
pair{"g", "h"},
pair{"i", "j"},
pair{"k", "l"},
@@ -152,7 +161,11 @@ func TestFieldsTermsIteratorTermsOnly(t *testing.T) {
require.NoError(t, err)
slice := toSlice(t, iter)
requireSlicesEqual(t, []pair{
- pair{"a", ""}, pair{"d", ""}, pair{"g", ""}, pair{"i", ""}, pair{"k", ""},
+ {"a", ""},
+ {"d", ""},
+ {"g", ""},
+ {"i", ""},
+ {"k", ""},
}, slice)
}
@@ -166,7 +179,7 @@ func TestFieldsTermsIteratorEmptyTerm(t *testing.T) {
iter, err := newFieldsAndTermsIterator(reader, fieldsAndTermsIteratorOpts{iterateTerms: false})
require.NoError(t, err)
slice := toSlice(t, iter)
- requireSlicesEqual(t, []pair{pair{"a", ""}}, slice)
+ requireSlicesEqual(t, []pair{{"a", ""}}, slice)
}
func TestFieldsTermsIteratorEmptyTermInclude(t *testing.T) {
@@ -183,38 +196,38 @@ func TestFieldsTermsIteratorEmptyTermInclude(t *testing.T) {
}
func TestFieldsTermsIteratorIterateTermsAndRestrictByQuery(t *testing.T) {
- testDocs := []doc.Document{
- doc.Document{
+ testDocs := []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("pineapple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -250,10 +263,10 @@ func TestFieldsTermsIteratorIterateTermsAndRestrictByQuery(t *testing.T) {
require.NoError(t, err)
slice := toSlice(t, iter)
requireSlicesEqual(t, []pair{
- pair{"color", "red"},
- pair{"color", "yellow"},
- pair{"fruit", "apple"},
- pair{"fruit", "pineapple"},
+ {"color", "red"},
+ {"color", "yellow"},
+ {"fruit", "apple"},
+ {"fruit", "pineapple"},
}, slice)
}
@@ -382,12 +395,12 @@ type fieldsTermsIterSetup struct {
}
func (s *fieldsTermsIterSetup) asSegment(t *testing.T) segment.Segment {
- docs := make([]doc.Document, 0, len(s.fields))
+ docs := make([]doc.Metadata, 0, len(s.fields))
for _, f := range s.fields {
- docs = append(docs, doc.Document{
+ docs = append(docs, doc.Metadata{
ID: []byte(fmt.Sprintf("id_%v_%v", f.Name, f.Value)),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte(f.Name),
Value: []byte(f.Value),
},
diff --git a/src/dbnode/storage/index/for_each_test.go b/src/dbnode/storage/index/for_each_test.go
index 9b5286926e..046f060c23 100644
--- a/src/dbnode/storage/index/for_each_test.go
+++ b/src/dbnode/storage/index/for_each_test.go
@@ -41,8 +41,8 @@ func TestWriteBatchForEachUnmarkedBatchByBlockStart(t *testing.T) {
nDur := time.Duration(n)
return now.Add(nDur * blockSize).Add(nDur * time.Minute)
}
- d := func(n int64) doc.Document {
- return doc.Document{
+ d := func(n int64) doc.Metadata {
+ return doc.Metadata{
ID: []byte(fmt.Sprintf("doc-%d", n)),
}
}
@@ -88,8 +88,8 @@ func TestWriteBatchForEachUnmarkedBatchByBlockStartMore(t *testing.T) {
nDur := time.Duration(n)
return now.Add(nDur * blockSize).Add(nDur * time.Minute)
}
- d := func(n int64) doc.Document {
- return doc.Document{
+ d := func(n int64) doc.Metadata {
+ return doc.Metadata{
ID: []byte(fmt.Sprintf("doc-%d", n)),
}
}
diff --git a/src/dbnode/storage/index/index_mock.go b/src/dbnode/storage/index/index_mock.go
index d98d3c231b..41e14ef338 100644
--- a/src/dbnode/storage/index/index_mock.go
+++ b/src/dbnode/storage/index/index_mock.go
@@ -129,7 +129,7 @@ func (mr *MockBaseResultsMockRecorder) EnforceLimits() *gomock.Call {
}
// AddDocuments mocks base method
-func (m *MockBaseResults) AddDocuments(batch []doc.Document) (int, int, error) {
+func (m *MockBaseResults) AddDocuments(batch []doc.Metadata) (int, int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDocuments", batch)
ret0, _ := ret[0].(int)
@@ -236,7 +236,7 @@ func (mr *MockQueryResultsMockRecorder) EnforceLimits() *gomock.Call {
}
// AddDocuments mocks base method
-func (m *MockQueryResults) AddDocuments(batch []doc.Document) (int, int, error) {
+func (m *MockQueryResults) AddDocuments(batch []doc.Metadata) (int, int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDocuments", batch)
ret0, _ := ret[0].(int)
@@ -430,7 +430,7 @@ func (mr *MockAggregateResultsMockRecorder) EnforceLimits() *gomock.Call {
}
// AddDocuments mocks base method
-func (m *MockAggregateResults) AddDocuments(batch []doc.Document) (int, int, error) {
+func (m *MockAggregateResults) AddDocuments(batch []doc.Metadata) (int, int, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "AddDocuments", batch)
ret0, _ := ret[0].(int)
diff --git a/src/dbnode/storage/index/options.go b/src/dbnode/storage/index/options.go
index 3ff3e9dd1e..2c5ca34829 100644
--- a/src/dbnode/storage/index/options.go
+++ b/src/dbnode/storage/index/options.go
@@ -40,7 +40,7 @@ const (
// defaultIndexInsertMode sets the default indexing mode to synchronous.
defaultIndexInsertMode = InsertSync
- // documentArrayPool size in general: 256*256*sizeof(doc.Document)
+ // documentArrayPool size in general: 256*256*sizeof(doc.Metadata)
// = 256 * 256 * 16
// = 1mb (but with Go's heap probably 2mb)
// TODO(r): Make this configurable in a followup change.
diff --git a/src/dbnode/storage/index/read_through_segment.go b/src/dbnode/storage/index/read_through_segment.go
index 52aaaf6251..2de3fb368c 100644
--- a/src/dbnode/storage/index/read_through_segment.go
+++ b/src/dbnode/storage/index/read_through_segment.go
@@ -263,7 +263,7 @@ func (s *readThroughSegmentReader) AllDocs() (index.IDDocIterator, error) {
}
// Doc is a pass through call, since there's no postings list to cache.
-func (s *readThroughSegmentReader) Doc(id postings.ID) (doc.Document, error) {
+func (s *readThroughSegmentReader) Doc(id postings.ID) (doc.Metadata, error) {
return s.reader.Doc(id)
}
diff --git a/src/dbnode/storage/index/results.go b/src/dbnode/storage/index/results.go
index b0c4372db3..c4d09520e1 100644
--- a/src/dbnode/storage/index/results.go
+++ b/src/dbnode/storage/index/results.go
@@ -104,7 +104,7 @@ func (r *results) Reset(nsID ident.ID, opts QueryResultsOptions) {
// NB: If documents with duplicate IDs are added, they are simply ignored and
// the first document added with an ID is returned.
-func (r *results) AddDocuments(batch []doc.Document) (int, int, error) {
+func (r *results) AddDocuments(batch []doc.Metadata) (int, int, error) {
r.Lock()
err := r.addDocumentsBatchWithLock(batch)
size := r.resultsMap.Len()
@@ -114,7 +114,7 @@ func (r *results) AddDocuments(batch []doc.Document) (int, int, error) {
return size, docsCount, err
}
-func (r *results) addDocumentsBatchWithLock(batch []doc.Document) error {
+func (r *results) addDocumentsBatchWithLock(batch []doc.Metadata) error {
for i := range batch {
_, size, err := r.addDocumentWithLock(batch[i])
if err != nil {
@@ -128,7 +128,7 @@ func (r *results) addDocumentsBatchWithLock(batch []doc.Document) error {
return nil
}
-func (r *results) addDocumentWithLock(d doc.Document) (bool, int, error) {
+func (r *results) addDocumentWithLock(d doc.Metadata) (bool, int, error) {
if len(d.ID) == 0 {
return false, r.resultsMap.Len(), errUnableToAddResultMissingID
}
diff --git a/src/dbnode/storage/index/results_test.go b/src/dbnode/storage/index/results_test.go
index b058aad9c1..3dc91b5947 100644
--- a/src/dbnode/storage/index/results_test.go
+++ b/src/dbnode/storage/index/results_test.go
@@ -58,8 +58,8 @@ func TestResultsInsertInvalid(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
assert.True(t, res.EnforceLimits())
- dInvalid := doc.Document{ID: nil}
- size, docsCount, err := res.AddDocuments([]doc.Document{dInvalid})
+ dInvalid := doc.Metadata{ID: nil}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dInvalid})
require.Error(t, err)
require.Equal(t, 0, size)
require.Equal(t, 1, docsCount)
@@ -70,8 +70,8 @@ func TestResultsInsertInvalid(t *testing.T) {
func TestResultsInsertIdempotency(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- dValid := doc.Document{ID: []byte("abc")}
- size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
+ dValid := doc.Metadata{ID: []byte("abc")}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -79,7 +79,7 @@ func TestResultsInsertIdempotency(t *testing.T) {
require.Equal(t, 1, res.Size())
require.Equal(t, 1, res.TotalDocsCount())
- size, docsCount, err = res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 2, docsCount)
@@ -90,9 +90,9 @@ func TestResultsInsertIdempotency(t *testing.T) {
func TestResultsInsertBatchOfTwo(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- d1 := doc.Document{ID: []byte("d1")}
- d2 := doc.Document{ID: []byte("d2")}
- size, docsCount, err := res.AddDocuments([]doc.Document{d1, d2})
+ d1 := doc.Metadata{ID: []byte("d1")}
+ d2 := doc.Metadata{ID: []byte("d2")}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1, d2})
require.NoError(t, err)
require.Equal(t, 2, size)
require.Equal(t, 2, docsCount)
@@ -103,8 +103,8 @@ func TestResultsInsertBatchOfTwo(t *testing.T) {
func TestResultsFirstInsertWins(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- d1 := doc.Document{ID: []byte("abc")}
- size, docsCount, err := res.AddDocuments([]doc.Document{d1})
+ d1 := doc.Metadata{ID: []byte("abc")}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -116,11 +116,15 @@ func TestResultsFirstInsertWins(t *testing.T) {
require.True(t, ok)
require.Equal(t, 0, tags.Remaining())
- d2 := doc.Document{ID: []byte("abc"),
+ d2 := doc.Metadata{
+ ID: []byte("abc"),
Fields: doc.Fields{
- doc.Field{Name: []byte("foo"), Value: []byte("bar")},
+ doc.Field{
+ Name: []byte("foo"),
+ Value: []byte("bar"),
+ },
}}
- size, docsCount, err = res.AddDocuments([]doc.Document{d2})
+ size, docsCount, err = res.AddDocuments([]doc.Metadata{d2})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 2, docsCount)
@@ -135,8 +139,8 @@ func TestResultsFirstInsertWins(t *testing.T) {
func TestResultsInsertContains(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- dValid := doc.Document{ID: []byte("abc")}
- size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
+ dValid := doc.Metadata{ID: []byte("abc")}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -148,10 +152,10 @@ func TestResultsInsertContains(t *testing.T) {
func TestResultsInsertDoesNotCopy(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- dValid := doc.Document{ID: []byte("abc"), Fields: []doc.Field{
+ dValid := doc.Metadata{ID: []byte("abc"), Fields: []doc.Field{
{Name: []byte("name"), Value: []byte("value")},
}}
- size, docsCount, err := res.AddDocuments([]doc.Document{dValid})
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{dValid})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -195,8 +199,8 @@ func TestResultsInsertDoesNotCopy(t *testing.T) {
func TestResultsReset(t *testing.T) {
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- d1 := doc.Document{ID: []byte("abc")}
- size, docsCount, err := res.AddDocuments([]doc.Document{d1})
+ d1 := doc.Metadata{ID: []byte("abc")}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
@@ -229,8 +233,8 @@ func TestResultsResetNamespaceClones(t *testing.T) {
func TestFinalize(t *testing.T) {
// Create a Results and insert some data.
res := NewQueryResults(nil, QueryResultsOptions{}, testOpts)
- d1 := doc.Document{ID: []byte("abc")}
- size, docsCount, err := res.AddDocuments([]doc.Document{d1})
+ d1 := doc.Metadata{ID: []byte("abc")}
+ size, docsCount, err := res.AddDocuments([]doc.Metadata{d1})
require.NoError(t, err)
require.Equal(t, 1, size)
require.Equal(t, 1, docsCount)
diff --git a/src/dbnode/storage/index/types.go b/src/dbnode/storage/index/types.go
index 8dd26d6025..a37b7a6849 100644
--- a/src/dbnode/storage/index/types.go
+++ b/src/dbnode/storage/index/types.go
@@ -164,7 +164,7 @@ type BaseResults interface {
// modified after this function returns without affecting the results map.
// TODO(r): We will need to change this behavior once index fields are
// mutable and the most recent need to shadow older entries.
- AddDocuments(batch []doc.Document) (size, docsCount int, err error)
+ AddDocuments(batch []doc.Metadata) (size, docsCount int, err error)
// Finalize releases any resources held by the Results object,
// including returning it to a backing pool.
@@ -512,7 +512,7 @@ type WriteBatch struct {
sortBy writeBatchSortBy
entries []WriteBatchEntry
- docs []doc.Document
+ docs []doc.Metadata
}
type writeBatchSortBy uint
@@ -533,14 +533,14 @@ func NewWriteBatch(opts WriteBatchOptions) *WriteBatch {
return &WriteBatch{
opts: opts,
entries: make([]WriteBatchEntry, 0, opts.InitialCapacity),
- docs: make([]doc.Document, 0, opts.InitialCapacity),
+ docs: make([]doc.Metadata, 0, opts.InitialCapacity),
}
}
// Append appends an entry with accompanying document.
func (b *WriteBatch) Append(
entry WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
) {
// Append just using the result from the current entry
b.appendWithResult(entry, doc, &entry.resultVal)
@@ -562,7 +562,7 @@ func (b *WriteBatch) AppendAll(from *WriteBatch) {
func (b *WriteBatch) appendWithResult(
entry WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
result *WriteBatchEntryResult,
) {
// Set private WriteBatchEntry fields
@@ -579,7 +579,7 @@ func (b *WriteBatch) appendWithResult(
type ForEachWriteBatchEntryFn func(
idx int,
entry WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
result WriteBatchEntryResult,
)
@@ -677,7 +677,7 @@ func (b *WriteBatch) numPending() int {
}
// PendingDocs returns all the docs in this batch that are unmarked.
-func (b *WriteBatch) PendingDocs() []doc.Document {
+func (b *WriteBatch) PendingDocs() []doc.Metadata {
b.SortByUnmarkedAndIndexBlockStart() // Ensure sorted by unmarked first
return b.docs[:b.numPending()]
}
@@ -707,7 +707,7 @@ func (b *WriteBatch) Reset() {
b.entries[i] = entryZeroed
}
b.entries = b.entries[:0]
- var docZeroed doc.Document
+ var docZeroed doc.Metadata
for i := range b.docs {
b.docs[i] = docZeroed
}
diff --git a/src/dbnode/storage/index/wide_query_results.go b/src/dbnode/storage/index/wide_query_results.go
index 442fd158cb..4cf8a0011e 100644
--- a/src/dbnode/storage/index/wide_query_results.go
+++ b/src/dbnode/storage/index/wide_query_results.go
@@ -94,7 +94,7 @@ func (r *wideResults) EnforceLimits() bool {
return false
}
-func (r *wideResults) AddDocuments(batch []doc.Document) (int, int, error) {
+func (r *wideResults) AddDocuments(batch []doc.Metadata) (int, int, error) {
var size, totalDocsCount int
r.RLock()
size, totalDocsCount = r.size, r.totalDocsCount
@@ -124,7 +124,7 @@ func (r *wideResults) AddDocuments(batch []doc.Document) (int, int, error) {
return size, totalDocsCount, err
}
-func (r *wideResults) addDocumentsBatchWithLock(batch []doc.Document) error {
+func (r *wideResults) addDocumentsBatchWithLock(batch []doc.Metadata) error {
for i := range batch {
if err := r.addDocumentWithLock(batch[i]); err != nil {
return err
@@ -134,7 +134,7 @@ func (r *wideResults) addDocumentsBatchWithLock(batch []doc.Document) error {
return nil
}
-func (r *wideResults) addDocumentWithLock(d doc.Document) error {
+func (r *wideResults) addDocumentWithLock(d doc.Metadata) error {
if len(d.ID) == 0 {
return errUnableToAddResultMissingID
}
diff --git a/src/dbnode/storage/index/wide_query_results_test.go b/src/dbnode/storage/index/wide_query_results_test.go
index 414e24de01..2f57a729ee 100644
--- a/src/dbnode/storage/index/wide_query_results_test.go
+++ b/src/dbnode/storage/index/wide_query_results_test.go
@@ -52,16 +52,16 @@ func init() {
bytesPool.Init()
}
-func buildDocs(documentCount int, batchSize int) [][]doc.Document {
+func buildDocs(documentCount int, batchSize int) [][]doc.Metadata {
docBatches := int(math.Ceil(float64(documentCount) / float64(batchSize)))
- docs := make([][]doc.Document, 0, docBatches)
+ docs := make([][]doc.Metadata, 0, docBatches)
for i := 0; i < docBatches; i++ {
- batch := make([]doc.Document, 0, batchSize)
+ batch := make([]doc.Metadata, 0, batchSize)
for j := 0; j < batchSize; j++ {
val := i*batchSize + j
if val < documentCount {
val := fmt.Sprintf("foo%d", i*batchSize+j)
- batch = append(batch, doc.Document{
+ batch = append(batch, doc.Metadata{
ID: []byte(val),
})
}
@@ -73,7 +73,7 @@ func buildDocs(documentCount int, batchSize int) [][]doc.Document {
return docs
}
-func buildExpected(t *testing.T, docs [][]doc.Document) [][]string {
+func buildExpected(_ *testing.T, docs [][]doc.Metadata) [][]string {
expected := make([][]string, 0, len(docs))
for _, batch := range docs {
idBatch := make([]string, 0, len(batch))
diff --git a/src/dbnode/storage/index_block_test.go b/src/dbnode/storage/index_block_test.go
index a4dc04d9e5..53bade47c0 100644
--- a/src/dbnode/storage/index_block_test.go
+++ b/src/dbnode/storage/index_block_test.go
@@ -79,7 +79,7 @@ func testWriteBatchBlockSizeOption(blockSize time.Duration) testWriteBatchOption
func testWriteBatch(
e index.WriteBatchEntry,
- d doc.Document,
+ d doc.Metadata,
opts ...testWriteBatchOption,
) *index.WriteBatch {
var options index.WriteBatchOptions
@@ -96,8 +96,8 @@ func testWriteBatchEntry(
tags ident.Tags,
timestamp time.Time,
fns index.OnIndexSeries,
-) (index.WriteBatchEntry, doc.Document) {
- d := doc.Document{ID: copyBytes(id.Bytes())}
+) (index.WriteBatchEntry, doc.Metadata) {
+ d := doc.Metadata{ID: copyBytes(id.Bytes())}
for _, tag := range tags.Values() {
d.Fields = append(d.Fields, doc.Field{
Name: copyBytes(tag.Name.Bytes()),
@@ -245,7 +245,7 @@ func TestNamespaceIndexWrite(t *testing.T) {
Do(func(batch *index.WriteBatch) {
docs := batch.PendingDocs()
require.Equal(t, 1, len(docs))
- require.Equal(t, doc.Document{
+ require.Equal(t, doc.Metadata{
ID: id.Bytes(),
Fields: doc.Fields{{Name: tag.Name.Bytes(), Value: tag.Value.Bytes()}},
}, docs[0])
@@ -321,7 +321,7 @@ func TestNamespaceIndexWriteCreatesBlock(t *testing.T) {
Do(func(batch *index.WriteBatch) {
docs := batch.PendingDocs()
require.Equal(t, 1, len(docs))
- require.Equal(t, doc.Document{
+ require.Equal(t, doc.Metadata{
ID: id.Bytes(),
Fields: doc.Fields{{Name: tag.Name.Bytes(), Value: tag.Value.Bytes()}},
}, docs[0])
@@ -831,11 +831,12 @@ func TestLimits(t *testing.T) {
opts interface{},
results index.BaseResults,
logFields interface{}) (bool, error) {
- results.AddDocuments([]doc.Document{
+ _, _, err = results.AddDocuments([]doc.Metadata{
// Results in size=1 and docs=2.
- doc.Document{ID: []byte("A")},
- doc.Document{ID: []byte("A")},
+ {ID: []byte("A")},
+ {ID: []byte("A")},
})
+ require.NoError(t, err)
return false, nil
})
diff --git a/src/dbnode/storage/index_query_concurrent_test.go b/src/dbnode/storage/index_query_concurrent_test.go
index 0c41e5df02..d27d442a92 100644
--- a/src/dbnode/storage/index_query_concurrent_test.go
+++ b/src/dbnode/storage/index_query_concurrent_test.go
@@ -150,7 +150,7 @@ func testNamespaceIndexHighConcurrentQueries(
var (
idsPerBlock = 16
- expectedResults = make(map[string]doc.Document)
+ expectedResults = make(map[string]doc.Metadata)
blockStarts []time.Time
blockIdx = -1
)
@@ -180,7 +180,7 @@ func testNamespaceIndexHighConcurrentQueries(
})
for i := 0; i < idsPerBlock; i++ {
id := fmt.Sprintf("foo.block_%d.id_%d", blockIdx, i)
- doc := doc.Document{
+ doc := doc.Metadata{
ID: []byte(id),
Fields: []doc.Field{
{
diff --git a/src/dbnode/storage/index_queue_forward_write_test.go b/src/dbnode/storage/index_queue_forward_write_test.go
index 82a7dcb298..05ddd534e4 100644
--- a/src/dbnode/storage/index_queue_forward_write_test.go
+++ b/src/dbnode/storage/index_queue_forward_write_test.go
@@ -275,7 +275,7 @@ func setupMockBlock(
Do(func(batch *index.WriteBatch) {
docs := batch.PendingDocs()
require.Equal(t, 1, len(docs))
- require.Equal(t, doc.Document{
+ require.Equal(t, doc.Metadata{
ID: id.Bytes(),
Fields: doc.Fields{{Name: tag.Name.Bytes(), Value: tag.Value.Bytes()}},
}, docs[0])
diff --git a/src/dbnode/storage/index_queue_test.go b/src/dbnode/storage/index_queue_test.go
index 04c663d655..64d1e6bf1f 100644
--- a/src/dbnode/storage/index_queue_test.go
+++ b/src/dbnode/storage/index_queue_test.go
@@ -224,7 +224,7 @@ func TestNamespaceIndexInsertOlderThanRetentionPeriod(t *testing.T) {
batch.ForEach(func(
idx int,
entry index.WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
result index.WriteBatchEntryResult,
) {
verified++
@@ -244,7 +244,7 @@ func TestNamespaceIndexInsertOlderThanRetentionPeriod(t *testing.T) {
batch.ForEach(func(
idx int,
entry index.WriteBatchEntry,
- doc doc.Document,
+ doc doc.Metadata,
result index.WriteBatchEntryResult,
) {
verified++
diff --git a/src/dbnode/storage/index_test.go b/src/dbnode/storage/index_test.go
index c258db46d8..b04bb5b82c 100644
--- a/src/dbnode/storage/index_test.go
+++ b/src/dbnode/storage/index_test.go
@@ -413,8 +413,8 @@ func verifyFlushForShards(
numBlocks int
persistClosedTimes int
persistCalledTimes int
- actualDocs = make([]doc.Document, 0)
- expectedDocs = make([]doc.Document, 0)
+ actualDocs = make([]doc.Metadata, 0)
+ expectedDocs = make([]doc.Metadata, 0)
)
// NB(bodu): Always align now w/ the index's view of now.
idx.nowFn = func() time.Time {
@@ -464,11 +464,11 @@ func verifyFlushForShards(
resultsID1 := ident.StringID("CACHED")
resultsID2 := ident.StringID("NEW")
- doc1 := doc.Document{
+ doc1 := doc.Metadata{
ID: resultsID1.Bytes(),
Fields: []doc.Field{},
}
- doc2 := doc.Document{
+ doc2 := doc.Metadata{
ID: resultsID2.Bytes(),
Fields: []doc.Field{},
}
@@ -495,7 +495,7 @@ func verifyFlushForShards(
results.EXPECT().Close()
mockShard.EXPECT().DocRef(resultsID1).Return(doc1, true, nil)
- mockShard.EXPECT().DocRef(resultsID2).Return(doc.Document{}, false, nil)
+ mockShard.EXPECT().DocRef(resultsID2).Return(doc.Metadata{}, false, nil)
mockShard.EXPECT().FetchBlocksMetadataV2(gomock.Any(), blockStart, blockStart.Add(idx.blockSize),
gomock.Any(), gomock.Any(), block.FetchBlocksMetadataOptions{OnlyDisk: true}).Return(results, nil, nil)
diff --git a/src/dbnode/storage/limits/errors.go b/src/dbnode/storage/limits/errors.go
index a876dd9b7a..d44ed882f7 100644
--- a/src/dbnode/storage/limits/errors.go
+++ b/src/dbnode/storage/limits/errors.go
@@ -39,10 +39,18 @@ func (err *queryLimitExceededError) Error() string {
// IsQueryLimitExceededError returns true if the error is a query limits exceeded error.
func IsQueryLimitExceededError(err error) bool {
+ //nolint:errorlint
for err != nil {
- if _, ok := err.(*queryLimitExceededError); ok { //nolint:errorlint
+ if _, ok := err.(*queryLimitExceededError); ok {
return true
}
+ if multiErr, ok := err.(xerrors.MultiError); ok {
+ for _, e := range multiErr.Errors() {
+ if IsQueryLimitExceededError(e) {
+ return true
+ }
+ }
+ }
err = xerrors.InnerError(err)
}
return false
diff --git a/src/dbnode/storage/limits/errors_test.go b/src/dbnode/storage/limits/errors_test.go
new file mode 100644
index 0000000000..751ba66192
--- /dev/null
+++ b/src/dbnode/storage/limits/errors_test.go
@@ -0,0 +1,96 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package limits
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ xerrors "github.com/m3db/m3/src/x/errors"
+)
+
+func TestIsQueryLimitExceededError(t *testing.T) {
+ randomErr := xerrors.NewNonRetryableError(errors.New("random error"))
+ limitExceededErr := NewQueryLimitExceededError("query limit exceeded")
+
+ tests := []struct {
+ name string
+ err error
+ expected bool
+ }{
+ {
+ "not query limit exceeded",
+ randomErr,
+ false,
+ },
+ {
+ "query limit exceeded",
+ limitExceededErr,
+ true,
+ },
+ {
+ "inner non query limit exceeded",
+ xerrors.NewInvalidParamsError(randomErr),
+ false,
+ },
+ {
+ "inner query limit exceeded",
+ xerrors.NewInvalidParamsError(limitExceededErr),
+ true,
+ },
+ {
+ "empty multi error",
+ multiError(),
+ false,
+ },
+ {
+ "multi error without query limit exceeded",
+ multiError(randomErr),
+ false,
+ },
+ {
+ "multi error with only query limit exceeded",
+ multiError(limitExceededErr),
+ true,
+ },
+ {
+ "multi error with query limit exceeded",
+ multiError(randomErr, xerrors.NewRetryableError(limitExceededErr)),
+ true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.expected, IsQueryLimitExceededError(tt.err))
+ })
+ }
+}
+
+func multiError(errs ...error) error {
+ multiErr := xerrors.NewMultiError()
+ for _, e := range errs {
+ multiErr = multiErr.Add(e)
+ }
+ return multiErr.FinalError()
+}
diff --git a/src/dbnode/storage/namespace.go b/src/dbnode/storage/namespace.go
index 4992f63c3c..9a7eb91ae0 100644
--- a/src/dbnode/storage/namespace.go
+++ b/src/dbnode/storage/namespace.go
@@ -1805,10 +1805,10 @@ func (n *dbNamespace) aggregateTiles(
return processedTileCount, nil
}
-func (n *dbNamespace) DocRef(id ident.ID) (doc.Document, bool, error) {
+func (n *dbNamespace) DocRef(id ident.ID) (doc.Metadata, bool, error) {
shard, _, err := n.readableShardFor(id)
if err != nil {
- return doc.Document{}, false, err
+ return doc.Metadata{}, false, err
}
return shard.DocRef(id)
}
diff --git a/src/dbnode/storage/series/buffer_test.go b/src/dbnode/storage/series/buffer_test.go
index 61141c8bae..3e7cc71064 100644
--- a/src/dbnode/storage/series/buffer_test.go
+++ b/src/dbnode/storage/series/buffer_test.go
@@ -1156,7 +1156,7 @@ func testBufferWithEmptyEncoder(t *testing.T, testSnapshot bool) {
return nil
}
- metadata := persist.NewMetadata(doc.Document{
+ metadata := persist.NewMetadata(doc.Metadata{
ID: []byte("some-id"),
})
@@ -1262,7 +1262,7 @@ func testBufferSnapshot(t *testing.T, opts Options, setAnn setAnnotation) {
}
// Perform a snapshot.
- metadata := persist.NewMetadata(doc.Document{
+ metadata := persist.NewMetadata(doc.Metadata{
ID: []byte("some-id"),
})
@@ -1409,7 +1409,7 @@ func TestBufferSnapshotWithColdWrites(t *testing.T) {
}
// Perform a snapshot.
- metadata := persist.NewMetadata(doc.Document{
+ metadata := persist.NewMetadata(doc.Metadata{
ID: []byte("some-id"),
})
diff --git a/src/dbnode/storage/series/lookup/entry_whitebox_test.go b/src/dbnode/storage/series/lookup/entry_whitebox_test.go
index 1e1d546cf5..fe4b8f2e93 100644
--- a/src/dbnode/storage/series/lookup/entry_whitebox_test.go
+++ b/src/dbnode/storage/series/lookup/entry_whitebox_test.go
@@ -69,7 +69,7 @@ func TestEntryIndexSeriesRef(t *testing.T) {
mockIndexWriter.EXPECT().BlockStartForWriteTime(blockStart.ToTime()).Return(blockStart)
mockSeries := series.NewMockDatabaseSeries(ctrl)
- mockSeries.EXPECT().Metadata().Return(doc.Document{})
+ mockSeries.EXPECT().Metadata().Return(doc.Metadata{})
mockSeries.EXPECT().Write(
context.NewContext(),
blockStart.ToTime(),
@@ -94,7 +94,7 @@ func TestEntryIndexSeriesRef(t *testing.T) {
OnIndexSeries: e,
EnqueuedAt: now,
},
- Document: doc.Document{},
+ Document: doc.Metadata{},
},
}).Return(nil)
diff --git a/src/dbnode/storage/series/series.go b/src/dbnode/storage/series/series.go
index 63d079de1c..d856dd4688 100644
--- a/src/dbnode/storage/series/series.go
+++ b/src/dbnode/storage/series/series.go
@@ -63,11 +63,11 @@ type dbSeries struct {
// pooling the ID rather than releasing it to the GC on
// calling series.Reset()).
// Note: The bytes that back "id ident.ID" are the same bytes
- // that are behind the ID in "metadata doc.Document", the whole
+ // that are behind the ID in "metadata doc.Metadata", the whole
// reason we keep an ident.ID on the series is since there's a lot
// of existing callsites that require the ID as an ident.ID.
id ident.ID
- metadata doc.Document
+ metadata doc.Metadata
uniqueIndex uint64
bootstrap dbSeriesBootstrap
@@ -124,7 +124,7 @@ func (s *dbSeries) ID() ident.ID {
return id
}
-func (s *dbSeries) Metadata() doc.Document {
+func (s *dbSeries) Metadata() doc.Metadata {
s.RLock()
metadata := s.metadata
s.RUnlock()
@@ -689,7 +689,7 @@ func (s *dbSeries) Close() {
// See Reset() for why these aren't finalized.
s.id = nil
- s.metadata = doc.Document{}
+ s.metadata = doc.Metadata{}
s.uniqueIndex = 0
switch s.opts.CachePolicy() {
diff --git a/src/dbnode/storage/series/series_mock.go b/src/dbnode/storage/series/series_mock.go
index cf67c09355..d0a70bedef 100644
--- a/src/dbnode/storage/series/series_mock.go
+++ b/src/dbnode/storage/series/series_mock.go
@@ -222,10 +222,10 @@ func (mr *MockDatabaseSeriesMockRecorder) LoadBlock(arg0, arg1 interface{}) *gom
}
// Metadata mocks base method
-func (m *MockDatabaseSeries) Metadata() doc.Document {
+func (m *MockDatabaseSeries) Metadata() doc.Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Metadata")
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
return ret0
}
diff --git a/src/dbnode/storage/series/types.go b/src/dbnode/storage/series/types.go
index 9f81928d1e..4e6ca8423a 100644
--- a/src/dbnode/storage/series/types.go
+++ b/src/dbnode/storage/series/types.go
@@ -44,7 +44,7 @@ import (
// DatabaseSeriesOptions is a set of options for creating a database series.
type DatabaseSeriesOptions struct {
ID ident.ID
- Metadata doc.Document
+ Metadata doc.Metadata
UniqueIndex uint64
BlockRetriever QueryableBlockRetriever
OnRetrieveBlock block.OnRetrieveBlock
@@ -61,7 +61,7 @@ type DatabaseSeries interface {
ID() ident.ID
// Metadata returns the metadata of the series.
- Metadata() doc.Document
+ Metadata() doc.Metadata
// UniqueIndex is the unique index for the series (for this current
// process, unless the time series expires).
diff --git a/src/dbnode/storage/shard.go b/src/dbnode/storage/shard.go
index 5b1d8e0034..9a5c4398c0 100644
--- a/src/dbnode/storage/shard.go
+++ b/src/dbnode/storage/shard.go
@@ -84,7 +84,7 @@ var (
// is hit or exceeded.
ErrDatabaseLoadLimitHit = errors.New("error loading series, database load limit hit")
- emptyDoc = doc.Document{}
+ emptyDoc = doc.Metadata{}
)
type filesetsFn func(
@@ -1232,7 +1232,7 @@ func (s *dbShard) newShardEntry(
// Hence this stays on the storage/series.DatabaseSeries for when it needs
// to be re-indexed.
var (
- seriesMetadata doc.Document
+ seriesMetadata doc.Metadata
err error
)
switch tagsArgOpts.arg {
@@ -2790,7 +2790,7 @@ func (s *dbShard) BootstrapState() BootstrapState {
return bs
}
-func (s *dbShard) DocRef(id ident.ID) (doc.Document, bool, error) {
+func (s *dbShard) DocRef(id ident.ID) (doc.Metadata, bool, error) {
s.RLock()
defer s.RUnlock()
diff --git a/src/dbnode/storage/shard_index_test.go b/src/dbnode/storage/shard_index_test.go
index 9f5087ebb5..1416c0ff34 100644
--- a/src/dbnode/storage/shard_index_test.go
+++ b/src/dbnode/storage/shard_index_test.go
@@ -46,7 +46,7 @@ func TestShardInsertNamespaceIndex(t *testing.T) {
opts := DefaultTestOptions()
lock := sync.Mutex{}
- indexWrites := []doc.Document{}
+ indexWrites := []doc.Metadata{}
now := time.Now()
blockSize := namespace.NewIndexOptions().BlockSize()
diff --git a/src/dbnode/storage/shard_test.go b/src/dbnode/storage/shard_test.go
index 8c0a237556..5e2d9ec294 100644
--- a/src/dbnode/storage/shard_test.go
+++ b/src/dbnode/storage/shard_test.go
@@ -629,7 +629,7 @@ func TestShardColdFlush(t *testing.T) {
for _, ds := range dirtyData {
curr := series.NewMockDatabaseSeries(ctrl)
curr.EXPECT().ID().Return(ds.id).AnyTimes()
- curr.EXPECT().Metadata().Return(doc.Document{ID: ds.id.Bytes()}).AnyTimes()
+ curr.EXPECT().Metadata().Return(doc.Metadata{ID: ds.id.Bytes()}).AnyTimes()
curr.EXPECT().ColdFlushBlockStarts(gomock.Any()).
Return(optimizedTimesFromTimes(ds.dirtyTimes))
shard.list.PushBack(lookup.NewEntry(lookup.NewEntryOptions{
@@ -1173,7 +1173,7 @@ func testShardWriteAsync(t *testing.T, writes []testWrite) {
document, exists, err := shard.DocRef(ident.StringID("NOT_PRESENT_ID"))
require.NoError(t, err)
require.False(t, exists)
- require.Equal(t, doc.Document{}, document)
+ require.Equal(t, doc.Metadata{}, document)
}
// This tests a race in shard ticking with an empty series pending expiration.
diff --git a/src/dbnode/storage/storage_mock.go b/src/dbnode/storage/storage_mock.go
index 1cc099d797..ff27a2feeb 100644
--- a/src/dbnode/storage/storage_mock.go
+++ b/src/dbnode/storage/storage_mock.go
@@ -1180,10 +1180,10 @@ func (mr *MockNamespaceMockRecorder) SetReadOnly(value interface{}) *gomock.Call
}
// DocRef mocks base method
-func (m *MockNamespace) DocRef(id ident.ID) (doc.Document, bool, error) {
+func (m *MockNamespace) DocRef(id ident.ID) (doc.Metadata, bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DocRef", id)
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
ret1, _ := ret[1].(bool)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
@@ -1401,10 +1401,10 @@ func (mr *MockdatabaseNamespaceMockRecorder) SetReadOnly(value interface{}) *gom
}
// DocRef mocks base method
-func (m *MockdatabaseNamespace) DocRef(id ident.ID) (doc.Document, bool, error) {
+func (m *MockdatabaseNamespace) DocRef(id ident.ID) (doc.Metadata, bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DocRef", id)
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
ret1, _ := ret[1].(bool)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
@@ -2343,10 +2343,10 @@ func (mr *MockdatabaseShardMockRecorder) SeriesReadWriteRef(id, tags interface{}
}
// DocRef mocks base method
-func (m *MockdatabaseShard) DocRef(id ident.ID) (doc.Document, bool, error) {
+func (m *MockdatabaseShard) DocRef(id ident.ID) (doc.Metadata, bool, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "DocRef", id)
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
ret1, _ := ret[1].(bool)
ret2, _ := ret[2].(error)
return ret0, ret1, ret2
@@ -3697,6 +3697,20 @@ func (mr *MockOnColdFlushNamespaceMockRecorder) OnFlushNewSeries(arg0 interface{
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OnFlushNewSeries", reflect.TypeOf((*MockOnColdFlushNamespace)(nil).OnFlushNewSeries), arg0)
}
+// CheckpointAndMaybeCompact mocks base method
+func (m *MockOnColdFlushNamespace) CheckpointAndMaybeCompact() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "CheckpointAndMaybeCompact")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// CheckpointAndMaybeCompact indicates an expected call of CheckpointAndMaybeCompact
+func (mr *MockOnColdFlushNamespaceMockRecorder) CheckpointAndMaybeCompact() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointAndMaybeCompact", reflect.TypeOf((*MockOnColdFlushNamespace)(nil).CheckpointAndMaybeCompact))
+}
+
// Done mocks base method
func (m *MockOnColdFlushNamespace) Done() error {
m.ctrl.T.Helper()
diff --git a/src/dbnode/storage/types.go b/src/dbnode/storage/types.go
index d3aa8ef7a1..afdbde4492 100644
--- a/src/dbnode/storage/types.go
+++ b/src/dbnode/storage/types.go
@@ -299,7 +299,7 @@ type Namespace interface {
SetReadOnly(value bool)
// DocRef returns the doc if already present in a namespace shard.
- DocRef(id ident.ID) (doc.Document, bool, error)
+ DocRef(id ident.ID) (doc.Metadata, bool, error)
// WideQueryIDs resolves the given query into known IDs in s streaming
// fashion.
@@ -669,7 +669,7 @@ type databaseShard interface {
) (SeriesReadWriteRef, error)
// DocRef returns the doc if already present in a shard series.
- DocRef(id ident.ID) (doc.Document, bool, error)
+ DocRef(id ident.ID) (doc.Metadata, bool, error)
// AggregateTiles does large tile aggregation from source shards into this shard.
AggregateTiles(
diff --git a/src/dbnode/ts/writes/types.go b/src/dbnode/ts/writes/types.go
index bb8ac839f5..121a483511 100644
--- a/src/dbnode/ts/writes/types.go
+++ b/src/dbnode/ts/writes/types.go
@@ -49,7 +49,7 @@ type Write struct {
// PendingIndexInsert is a pending index insert.
type PendingIndexInsert struct {
Entry index.WriteBatchEntry
- Document doc.Document
+ Document doc.Metadata
}
// BatchWrite represents a write that was added to the
diff --git a/src/m3ninx/doc/doc_arraypool_gen.go b/src/m3ninx/doc/doc_arraypool_gen.go
index 224ad8caf0..c52d4f4305 100644
--- a/src/m3ninx/doc/doc_arraypool_gen.go
+++ b/src/m3ninx/doc/doc_arraypool_gen.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -48,20 +48,20 @@ import (
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// DocumentArrayPool provides a pool for document slices.
+// DocumentArrayPool provides a pool for metadata slices.
type DocumentArrayPool interface {
// Init initializes the array pool, it needs to be called
// before Get/Put use.
Init()
// Get returns the a slice from the pool.
- Get() []Document
+ Get() []Metadata
// Put returns the provided slice to the pool.
- Put(elems []Document)
+ Put(elems []Metadata)
}
-type DocumentFinalizeFn func([]Document) []Document
+type DocumentFinalizeFn func([]Metadata) []Metadata
type DocumentArrayPoolOpts struct {
Options pool.ObjectPoolOptions
@@ -85,15 +85,15 @@ func NewDocumentArrayPool(opts DocumentArrayPoolOpts) DocumentArrayPool {
func (p *DocumentArrPool) Init() {
p.pool.Init(func() interface{} {
- return make([]Document, 0, p.opts.Capacity)
+ return make([]Metadata, 0, p.opts.Capacity)
})
}
-func (p *DocumentArrPool) Get() []Document {
- return p.pool.Get().([]Document)
+func (p *DocumentArrPool) Get() []Metadata {
+ return p.pool.Get().([]Metadata)
}
-func (p *DocumentArrPool) Put(arr []Document) {
+func (p *DocumentArrPool) Put(arr []Metadata) {
arr = p.opts.FinalizeFn(arr)
if max := p.opts.MaxCapacity; max > 0 && cap(arr) > max {
return
@@ -101,8 +101,8 @@ func (p *DocumentArrPool) Put(arr []Document) {
p.pool.Put(arr)
}
-func defaultDocumentFinalizerFn(elems []Document) []Document {
- var empty Document
+func defaultDocumentFinalizerFn(elems []Metadata) []Metadata {
+ var empty Metadata
for i := range elems {
elems[i] = empty
}
@@ -110,16 +110,16 @@ func defaultDocumentFinalizerFn(elems []Document) []Document {
return elems
}
-type DocumentArr []Document
+type DocumentArr []Metadata
-func (elems DocumentArr) grow(n int) []Document {
+func (elems DocumentArr) grow(n int) []Metadata {
if cap(elems) < n {
- elems = make([]Document, n)
+ elems = make([]Metadata, n)
}
elems = elems[:n]
// following compiler optimized memcpy impl
// https://github.com/golang/go/wiki/CompilerOptimizations#optimized-memclr
- var empty Document
+ var empty Metadata
for i := range elems {
elems[i] = empty
}
diff --git a/src/m3ninx/doc/doc_mock.go b/src/m3ninx/doc/doc_mock.go
index 0cec3d8619..25e54975a1 100644
--- a/src/m3ninx/doc/doc_mock.go
+++ b/src/m3ninx/doc/doc_mock.go
@@ -68,10 +68,10 @@ func (mr *MockIteratorMockRecorder) Next() *gomock.Call {
}
// Current mocks base method
-func (m *MockIterator) Current() Document {
+func (m *MockIterator) Current() Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Current")
- ret0, _ := ret[0].(Document)
+ ret0, _ := ret[0].(Metadata)
return ret0
}
diff --git a/src/m3ninx/doc/document.go b/src/m3ninx/doc/document.go
index 164c0f210e..bb92d6525d 100644
--- a/src/m3ninx/doc/document.go
+++ b/src/m3ninx/doc/document.go
@@ -87,15 +87,15 @@ func (f Fields) shallowCopy() Fields {
return cp
}
-// Document represents a document to be indexed.
-type Document struct {
+// Metadata represents a document to be indexed.
+type Metadata struct {
ID []byte
Fields []Field
}
// Get returns the value of the specified field name in the document if it exists.
-func (d Document) Get(fieldName []byte) ([]byte, bool) {
- for _, f := range d.Fields {
+func (m Metadata) Get(fieldName []byte) ([]byte, bool) {
+ for _, f := range m.Fields { // nolint:gocritic
if bytes.Equal(fieldName, f.Name) {
return f.Value, true
}
@@ -105,12 +105,12 @@ func (d Document) Get(fieldName []byte) ([]byte, bool) {
// Compare returns an integer comparing two documents. The result will be 0 if the documents
// are equal, -1 if d is ordered before other, and 1 if d is ordered aftered other.
-func (d Document) Compare(other Document) int {
- if c := bytes.Compare(d.ID, other.ID); c != 0 {
+func (m Metadata) Compare(other Metadata) int {
+ if c := bytes.Compare(m.ID, other.ID); c != 0 {
return c
}
- l, r := Fields(d.Fields), Fields(other.Fields)
+ l, r := Fields(m.Fields), Fields(other.Fields)
// Make a shallow copy of the Fields so we don't mutate the document.
if !sort.IsSorted(l) {
@@ -146,21 +146,21 @@ func (d Document) Compare(other Document) int {
}
// Equal returns a bool indicating whether d is equal to other.
-func (d Document) Equal(other Document) bool {
- return d.Compare(other) == 0
+func (m Metadata) Equal(other Metadata) bool {
+ return m.Compare(other) == 0
}
// Validate returns a bool indicating whether the document is valid.
-func (d Document) Validate() error {
- if len(d.Fields) == 0 && !d.HasID() {
+func (m Metadata) Validate() error {
+ if len(m.Fields) == 0 && !m.HasID() {
return ErrEmptyDocument
}
- if !utf8.Valid(d.ID) {
- return fmt.Errorf("document has invalid ID: id=%v, id_hex=%x", d.ID, d.ID)
+ if !utf8.Valid(m.ID) {
+ return fmt.Errorf("document has invalid ID: id=%v, id_hex=%x", m.ID, m.ID)
}
- for _, f := range d.Fields {
+ for _, f := range m.Fields { // nolint:gocritic
// TODO: Should we enforce uniqueness of field names?
if !utf8.Valid(f.Name) {
return fmt.Errorf("document has invalid field name: name=%v, name_hex=%x",
@@ -181,23 +181,23 @@ func (d Document) Validate() error {
}
// HasID returns a bool indicating whether the document has an ID or not.
-func (d Document) HasID() bool {
- return len(d.ID) > 0
+func (m Metadata) HasID() bool {
+ return len(m.ID) > 0
}
-func (d Document) String() string {
+func (m Metadata) String() string {
var buf bytes.Buffer
- for i, f := range d.Fields {
+ for i, f := range m.Fields { // nolint:gocritic
buf.WriteString(fmt.Sprintf("%s: %s", f.Name, f.Value))
- if i != len(d.Fields)-1 {
+ if i != len(m.Fields)-1 {
buf.WriteString(", ")
}
}
- return fmt.Sprintf("{id: %s, fields: {%s}}", d.ID, buf.String())
+ return fmt.Sprintf("{id: %s, fields: {%s}}", m.ID, buf.String())
}
// Documents is a list of documents.
-type Documents []Document
+type Documents []Metadata
func (ds Documents) Len() int {
return len(ds)
diff --git a/src/m3ninx/doc/document_matcher.go b/src/m3ninx/doc/document_matcher.go
index deba71d7dc..14fe8a72b7 100644
--- a/src/m3ninx/doc/document_matcher.go
+++ b/src/m3ninx/doc/document_matcher.go
@@ -33,12 +33,12 @@ type DocumentMatcher interface {
}
// NewDocumentMatcher returns a new DocumentMatcher.
-func NewDocumentMatcher(d Document) DocumentMatcher {
+func NewDocumentMatcher(d Metadata) DocumentMatcher {
return docMatcher{d}
}
type docMatcher struct {
- d Document
+ d Metadata
}
func (dm docMatcher) Matches(x interface{}) bool {
diff --git a/src/m3ninx/doc/document_matcher_test.go b/src/m3ninx/doc/document_matcher_test.go
index a928be7da4..682b0c6ec0 100644
--- a/src/m3ninx/doc/document_matcher_test.go
+++ b/src/m3ninx/doc/document_matcher_test.go
@@ -29,18 +29,18 @@ import (
func TestDocumentMatcher(t *testing.T) {
tests := []struct {
name string
- l, r Document
+ l, r Metadata
expected bool
}{
{
name: "empty documents are equal",
- l: Document{},
- r: Document{},
+ l: Metadata{},
+ r: Metadata{},
expected: true,
},
{
name: "documents with the same fields in the same order are equal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -53,7 +53,7 @@ func TestDocumentMatcher(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -70,7 +70,7 @@ func TestDocumentMatcher(t *testing.T) {
},
{
name: "documents with the same fields in different order are unequal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -83,7 +83,7 @@ func TestDocumentMatcher(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -100,7 +100,7 @@ func TestDocumentMatcher(t *testing.T) {
},
{
name: "documents with different fields are unequal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -113,7 +113,7 @@ func TestDocumentMatcher(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -130,7 +130,7 @@ func TestDocumentMatcher(t *testing.T) {
},
{
name: "documents with different IDs are unequal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -139,7 +139,7 @@ func TestDocumentMatcher(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("080292"),
Fields: []Field{
Field{
diff --git a/src/m3ninx/doc/document_test.go b/src/m3ninx/doc/document_test.go
index a13666637c..0629d6bcf4 100644
--- a/src/m3ninx/doc/document_test.go
+++ b/src/m3ninx/doc/document_test.go
@@ -98,14 +98,14 @@ func TestSortingFields(t *testing.T) {
func TestDocumentGetField(t *testing.T) {
tests := []struct {
name string
- input Document
+ input Metadata
fieldName []byte
expectedOk bool
expectedVal []byte
}{
{
name: "get existing field",
- input: Document{
+ input: Metadata{
Fields: []Field{
Field{
Name: []byte("apple"),
@@ -119,7 +119,7 @@ func TestDocumentGetField(t *testing.T) {
},
{
name: "get nonexisting field",
- input: Document{
+ input: Metadata{
Fields: []Field{
Field{
Name: []byte("apple"),
@@ -148,18 +148,18 @@ func TestDocumentGetField(t *testing.T) {
func TestDocumentCompare(t *testing.T) {
tests := []struct {
name string
- l, r Document
+ l, r Metadata
expected int
}{
{
name: "empty documents are equal",
- l: Document{},
- r: Document{},
+ l: Metadata{},
+ r: Metadata{},
expected: 0,
},
{
name: "documents with the same id and the same fields in the same order are equal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -172,7 +172,7 @@ func TestDocumentCompare(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -189,7 +189,7 @@ func TestDocumentCompare(t *testing.T) {
},
{
name: "documents are ordered by their IDs",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -198,7 +198,7 @@ func TestDocumentCompare(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831991"),
Fields: []Field{
Field{
@@ -211,7 +211,7 @@ func TestDocumentCompare(t *testing.T) {
},
{
name: "documents are ordered by their field names",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -220,7 +220,7 @@ func TestDocumentCompare(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -233,7 +233,7 @@ func TestDocumentCompare(t *testing.T) {
},
{
name: "documents are ordered by their field values",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -242,7 +242,7 @@ func TestDocumentCompare(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -255,7 +255,7 @@ func TestDocumentCompare(t *testing.T) {
},
{
name: "documents are ordered by their lengths",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -264,7 +264,7 @@ func TestDocumentCompare(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -290,18 +290,18 @@ func TestDocumentCompare(t *testing.T) {
func TestDocumentEquality(t *testing.T) {
tests := []struct {
name string
- l, r Document
+ l, r Metadata
expected bool
}{
{
name: "empty documents are equal",
- l: Document{},
- r: Document{},
+ l: Metadata{},
+ r: Metadata{},
expected: true,
},
{
name: "documents with the same fields in the same order are equal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -314,7 +314,7 @@ func TestDocumentEquality(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -331,7 +331,7 @@ func TestDocumentEquality(t *testing.T) {
},
{
name: "documents with the same fields in different order are equal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -344,7 +344,7 @@ func TestDocumentEquality(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -361,7 +361,7 @@ func TestDocumentEquality(t *testing.T) {
},
{
name: "documents with different fields are unequal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -374,7 +374,7 @@ func TestDocumentEquality(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -391,7 +391,7 @@ func TestDocumentEquality(t *testing.T) {
},
{
name: "documents with different IDs are unequal",
- l: Document{
+ l: Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -400,7 +400,7 @@ func TestDocumentEquality(t *testing.T) {
},
},
},
- r: Document{
+ r: Metadata{
ID: []byte("080292"),
Fields: []Field{
Field{
@@ -423,24 +423,24 @@ func TestDocumentEquality(t *testing.T) {
func TestDocumentValidation(t *testing.T) {
tests := []struct {
name string
- input Document
+ input Metadata
expectedErr bool
}{
{
name: "empty document",
- input: Document{},
+ input: Metadata{},
expectedErr: true,
},
{
name: "empty document w/ ID",
- input: Document{
+ input: Metadata{
ID: []byte("foobar"),
},
expectedErr: false,
},
{
name: "invalid UTF-8 in field name",
- input: Document{
+ input: Metadata{
Fields: []Field{
Field{
Name: []byte("\xff"),
@@ -452,7 +452,7 @@ func TestDocumentValidation(t *testing.T) {
},
{
name: "invalid UTF-8 in field value",
- input: Document{
+ input: Metadata{
Fields: []Field{
Field{
Name: []byte("\xff"),
@@ -464,7 +464,7 @@ func TestDocumentValidation(t *testing.T) {
},
{
name: "document contains field with reserved field name",
- input: Document{
+ input: Metadata{
Fields: []Field{
Field{
Name: []byte("apple"),
@@ -480,7 +480,7 @@ func TestDocumentValidation(t *testing.T) {
},
{
name: "valid document",
- input: Document{
+ input: Metadata{
Fields: []Field{
Field{
Name: []byte("apple"),
@@ -507,26 +507,26 @@ func TestDocumentValidation(t *testing.T) {
func TestDocumentHasID(t *testing.T) {
tests := []struct {
name string
- input Document
+ input Metadata
expected bool
}{
{
name: "nil ID",
- input: Document{
+ input: Metadata{
ID: nil,
},
expected: false,
},
{
name: "zero-length ID",
- input: Document{
+ input: Metadata{
ID: make([]byte, 0, 16),
},
expected: false,
},
{
name: "valid ID",
- input: Document{
+ input: Metadata{
ID: []byte("831992"),
},
expected: true,
@@ -548,7 +548,7 @@ func TestSortingDocuments(t *testing.T) {
{
name: "unordered documents",
input: Documents{
- Document{
+ Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -557,7 +557,7 @@ func TestSortingDocuments(t *testing.T) {
},
},
},
- Document{
+ Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -568,7 +568,7 @@ func TestSortingDocuments(t *testing.T) {
},
},
expected: Documents{
- Document{
+ Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
@@ -577,7 +577,7 @@ func TestSortingDocuments(t *testing.T) {
},
},
},
- Document{
+ Metadata{
ID: []byte("831992"),
Fields: []Field{
Field{
diff --git a/src/m3ninx/doc/types.go b/src/m3ninx/doc/types.go
index 76fc04e3ac..5593b07f24 100644
--- a/src/m3ninx/doc/types.go
+++ b/src/m3ninx/doc/types.go
@@ -28,10 +28,10 @@ type Iterator interface {
Next() bool
// Current returns the current document. It is only safe to call Current immediately
- // after a call to Next confirms there are more elements remaining. The Document
+ // after a call to Next confirms there are more elements remaining. The Metadata
// returned from Current is only valid until the following call to Next(). Callers
- // should copy the Document if they need it live longer.
- Current() Document
+ // should copy the Metadata if they need it live longer.
+ Current() Metadata
// Err returns any errors encountered during iteration.
Err() error
diff --git a/src/m3ninx/generated-source-files.mk b/src/m3ninx/generated-source-files.mk
index 980fa83c3b..4b85df122b 100644
--- a/src/m3ninx/generated-source-files.mk
+++ b/src/m3ninx/generated-source-files.mk
@@ -119,7 +119,7 @@ genny-arraypool-bytes-slice-array-pool:
genny-arraypool-document-array-pool:
cd $(m3x_package_path) && make genny-arraypool \
pkg=doc \
- elem_type=Document \
+ elem_type=Metadata \
target_package=$(m3ninx_package)/doc \
out_file=doc_arraypool_gen.go \
rename_type_prefix=Document \
diff --git a/src/m3ninx/idx/types.go b/src/m3ninx/idx/types.go
index 4c35dd7f90..11fe2609ad 100644
--- a/src/m3ninx/idx/types.go
+++ b/src/m3ninx/idx/types.go
@@ -27,7 +27,7 @@ import (
// Index is an inverted index.
type Index interface {
// Insert inserts a document into the index.
- Insert(d doc.Document) error
+ Insert(d doc.Metadata) error
// Searcher returns a Searcher over a point-in-time view of the index.
Searcher() (Searcher, error)
diff --git a/src/m3ninx/index/batch.go b/src/m3ninx/index/batch.go
index 93162037aa..8d33d93d35 100644
--- a/src/m3ninx/index/batch.go
+++ b/src/m3ninx/index/batch.go
@@ -36,7 +36,7 @@ var (
// Batch represents a batch of documents that should be inserted into the index.
type Batch struct {
- Docs []doc.Document
+ Docs []doc.Metadata
// If AllowPartialUpdates is true the index will continue to index documents in the batch
// even if it encounters an error attempting to index a previous document in the batch.
@@ -67,7 +67,7 @@ func AllowPartialUpdates() BatchOption {
}
// NewBatch returns a Batch of documents.
-func NewBatch(docs []doc.Document, opts ...BatchOption) Batch {
+func NewBatch(docs []doc.Metadata, opts ...BatchOption) Batch {
b := Batch{Docs: docs}
for _, opt := range opts {
diff --git a/src/m3ninx/index/batch_matcher_test.go b/src/m3ninx/index/batch_matcher_test.go
index 8ce40b7c11..047b2f532d 100644
--- a/src/m3ninx/index/batch_matcher_test.go
+++ b/src/m3ninx/index/batch_matcher_test.go
@@ -42,8 +42,8 @@ func TestBatchMatcherAllowPartialReflexive(t *testing.T) {
func TestBatchMatcherLengthReflexive(t *testing.T) {
a := index.Batch{
- Docs: []doc.Document{
- doc.Document{},
+ Docs: []doc.Metadata{
+ {},
},
}
b := index.Batch{}
@@ -52,136 +52,136 @@ func TestBatchMatcherLengthReflexive(t *testing.T) {
}
func TestBatchMatcherSameDoc(t *testing.T) {
- testDoc := doc.Document{
+ testDoc := doc.Metadata{
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("foo"),
Value: []byte("bar"),
},
},
}
a := index.Batch{
- Docs: []doc.Document{testDoc, testDoc},
+ Docs: []doc.Metadata{testDoc, testDoc},
}
b := index.Batch{
- Docs: []doc.Document{testDoc, testDoc},
+ Docs: []doc.Metadata{testDoc, testDoc},
}
require.True(t, index.NewBatchMatcher(a).Matches(b))
require.True(t, index.NewBatchMatcher(b).Matches(a))
}
func TestBatchMatcherOrderMatters(t *testing.T) {
- testDocA := doc.Document{
+ testDocA := doc.Metadata{
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("foo"),
Value: []byte("bar"),
},
},
}
- testDocB := doc.Document{
+ testDocB := doc.Metadata{
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("bar"),
Value: []byte("foo"),
},
},
}
a := index.Batch{
- Docs: []doc.Document{testDocA, testDocB},
+ Docs: []doc.Metadata{testDocA, testDocB},
}
b := index.Batch{
- Docs: []doc.Document{testDocB, testDocA},
+ Docs: []doc.Metadata{testDocB, testDocA},
}
require.False(t, index.NewBatchMatcher(a).Matches(b))
require.False(t, index.NewBatchMatcher(b).Matches(a))
}
func TestBatchMatcherSameDocs(t *testing.T) {
- testDocA := doc.Document{
+ testDocA := doc.Metadata{
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("foo"),
Value: []byte("bar"),
},
},
}
- testDocB := doc.Document{
+ testDocB := doc.Metadata{
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("bar"),
Value: []byte("foo"),
},
},
}
a := index.Batch{
- Docs: []doc.Document{testDocA, testDocB},
+ Docs: []doc.Metadata{testDocA, testDocB},
}
b := index.Batch{
- Docs: []doc.Document{testDocA, testDocB},
+ Docs: []doc.Metadata{testDocA, testDocB},
}
require.True(t, index.NewBatchMatcher(a).Matches(b))
require.True(t, index.NewBatchMatcher(b).Matches(a))
}
func TestBatchMatcherDocFieldsDiffer(t *testing.T) {
- testDocA := doc.Document{
+ testDocA := doc.Metadata{
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("foo"),
Value: []byte("bar"),
},
},
}
- testDocB := doc.Document{
+ testDocB := doc.Metadata{
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("foo"),
Value: []byte("bar1"),
},
},
}
a := index.Batch{
- Docs: []doc.Document{testDocA},
+ Docs: []doc.Metadata{testDocA},
}
b := index.Batch{
- Docs: []doc.Document{testDocB},
+ Docs: []doc.Metadata{testDocB},
}
require.False(t, index.NewBatchMatcher(a).Matches(b))
require.False(t, index.NewBatchMatcher(b).Matches(a))
}
func TestBatchMatcherDocIDsDiffer(t *testing.T) {
- testDocA := doc.Document{
+ testDocA := doc.Metadata{
ID: []byte("abc1"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("foo"),
Value: []byte("bar"),
},
},
}
- testDocB := doc.Document{
+ testDocB := doc.Metadata{
ID: []byte("abc2"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("foo"),
Value: []byte("bar"),
},
},
}
a := index.Batch{
- Docs: []doc.Document{testDocA},
+ Docs: []doc.Metadata{testDocA},
}
b := index.Batch{
- Docs: []doc.Document{testDocB},
+ Docs: []doc.Metadata{testDocB},
}
require.False(t, index.NewBatchMatcher(a).Matches(b))
require.False(t, index.NewBatchMatcher(b).Matches(a))
diff --git a/src/m3ninx/index/index_mock.go b/src/m3ninx/index/index_mock.go
index 26320c2311..4888262daf 100644
--- a/src/m3ninx/index/index_mock.go
+++ b/src/m3ninx/index/index_mock.go
@@ -86,10 +86,10 @@ func (mr *MockReaderMockRecorder) Close() *gomock.Call {
}
// Doc mocks base method
-func (m *MockReader) Doc(arg0 postings.ID) (doc.Document, error) {
+func (m *MockReader) Doc(arg0 postings.ID) (doc.Metadata, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Doc", arg0)
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -199,10 +199,10 @@ func (m *MockDocRetriever) EXPECT() *MockDocRetrieverMockRecorder {
}
// Doc mocks base method
-func (m *MockDocRetriever) Doc(arg0 postings.ID) (doc.Document, error) {
+func (m *MockDocRetriever) Doc(arg0 postings.ID) (doc.Metadata, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Doc", arg0)
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/src/m3ninx/index/iterator.go b/src/m3ninx/index/iterator.go
index 343226949e..464e430517 100644
--- a/src/m3ninx/index/iterator.go
+++ b/src/m3ninx/index/iterator.go
@@ -35,7 +35,7 @@ type idDocIterator struct {
retriever DocRetriever
postingsIter postings.Iterator
- currDoc doc.Document
+ currDoc doc.Metadata
currID postings.ID
closed bool
err error
@@ -65,7 +65,7 @@ func (it *idDocIterator) Next() bool {
return true
}
-func (it *idDocIterator) Current() doc.Document {
+func (it *idDocIterator) Current() doc.Metadata {
return it.currDoc
}
@@ -82,7 +82,7 @@ func (it *idDocIterator) Close() error {
return errIteratorClosed
}
it.closed = true
- it.currDoc = doc.Document{}
+ it.currDoc = doc.Metadata{}
it.currID = postings.ID(0)
err := it.postingsIter.Close()
return err
diff --git a/src/m3ninx/index/iterator_test.go b/src/m3ninx/index/iterator_test.go
index d702eebde1..d0da569c2f 100644
--- a/src/m3ninx/index/iterator_test.go
+++ b/src/m3ninx/index/iterator_test.go
@@ -36,11 +36,11 @@ func TestIterator(t *testing.T) {
docWithIds := []struct {
id postings.ID
- doc doc.Document
+ doc doc.Metadata
}{
{
id: 42,
- doc: doc.Document{
+ doc: doc.Metadata{
Fields: []doc.Field{
doc.Field{
Name: []byte("apple"),
@@ -51,7 +51,7 @@ func TestIterator(t *testing.T) {
},
{
id: 53,
- doc: doc.Document{
+ doc: doc.Metadata{
Fields: []doc.Field{
doc.Field{
Name: []byte("banana"),
@@ -62,7 +62,7 @@ func TestIterator(t *testing.T) {
},
{
id: 81,
- doc: doc.Document{
+ doc: doc.Metadata{
Fields: []doc.Field{
doc.Field{
Name: []byte("carrot"),
diff --git a/src/m3ninx/index/segment/builder/builder.go b/src/m3ninx/index/segment/builder/builder.go
index ac1cd597e5..4e2576b6f0 100644
--- a/src/m3ninx/index/segment/builder/builder.go
+++ b/src/m3ninx/index/segment/builder/builder.go
@@ -178,7 +178,7 @@ type builder struct {
newUUIDFn util.NewUUIDFn
batchSizeOne index.Batch
- docs []doc.Document
+ docs []doc.Metadata
idSet *IDsMap
shardedJobs []indexJob
shardedFields *shardedFields
@@ -200,7 +200,7 @@ func NewBuilderFromDocuments(opts Options) (segment.CloseableDocumentsBuilder, e
opts: opts,
newUUIDFn: opts.NewUUIDFn(),
batchSizeOne: index.Batch{
- Docs: make([]doc.Document, 1),
+ Docs: make([]doc.Metadata, 1),
},
idSet: NewIDsMap(IDsMapOptions{
InitialSize: opts.InitialCapacity(),
@@ -289,7 +289,7 @@ func (b *builder) Reset() {
defer b.status.Unlock()
// Reset the documents slice.
- var empty doc.Document
+ var empty doc.Metadata
for i := range b.docs {
b.docs[i] = empty
}
@@ -311,7 +311,7 @@ func (b *builder) Reset() {
}
}
-func (b *builder) Insert(d doc.Document) ([]byte, error) {
+func (b *builder) Insert(d doc.Metadata) ([]byte, error) {
b.status.Lock()
defer b.status.Unlock()
@@ -485,19 +485,19 @@ func (b *builder) AllDocs() (index.IDDocIterator, error) {
return index.NewIDDocIterator(b, rangeIter), nil
}
-func (b *builder) Doc(id postings.ID) (doc.Document, error) {
+func (b *builder) Doc(id postings.ID) (doc.Metadata, error) {
b.status.RLock()
defer b.status.RUnlock()
idx := int(id)
if idx < 0 || idx >= len(b.docs) {
- return doc.Document{}, errDocNotFound
+ return doc.Metadata{}, errDocNotFound
}
return b.docs[idx], nil
}
-func (b *builder) Docs() []doc.Document {
+func (b *builder) Docs() []doc.Metadata {
b.status.RLock()
defer b.status.RUnlock()
diff --git a/src/m3ninx/index/segment/builder/builder_test.go b/src/m3ninx/index/segment/builder/builder_test.go
index 2a62ae45c1..20e89dea00 100644
--- a/src/m3ninx/index/segment/builder/builder_test.go
+++ b/src/m3ninx/index/segment/builder/builder_test.go
@@ -36,39 +36,39 @@ import (
var (
testOptions = NewOptions()
- testDocuments = []doc.Document{
- doc.Document{
+ testDocuments = []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("42"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("pineapple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
diff --git a/src/m3ninx/index/segment/builder/fields_map_new.go b/src/m3ninx/index/segment/builder/fields_map_new.go
index 015a1e21eb..86980955f6 100644
--- a/src/m3ninx/index/segment/builder/fields_map_new.go
+++ b/src/m3ninx/index/segment/builder/fields_map_new.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/m3ninx/index/segment/builder/ids_map_new.go b/src/m3ninx/index/segment/builder/ids_map_new.go
index 757181e444..cea45969b8 100644
--- a/src/m3ninx/index/segment/builder/ids_map_new.go
+++ b/src/m3ninx/index/segment/builder/ids_map_new.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/m3ninx/index/segment/builder/multi_segments_builder.go b/src/m3ninx/index/segment/builder/multi_segments_builder.go
index ef890fd30a..4c156e5127 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_builder.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_builder.go
@@ -32,7 +32,7 @@ import (
)
type builderFromSegments struct {
- docs []doc.Document
+ docs []doc.Metadata
idSet *IDsMap
segments []segmentMetadata
termsIter *termsIterFromSegments
@@ -61,7 +61,7 @@ func NewBuilderFromSegments(opts Options) segment.SegmentsBuilder {
func (b *builderFromSegments) Reset() {
// Reset the documents slice
- var emptyDoc doc.Document
+ var emptyDoc doc.Metadata
for i := range b.docs {
b.docs[i] = emptyDoc
}
@@ -92,7 +92,7 @@ func (b *builderFromSegments) AddSegments(segments []segment.Segment) error {
// Ensure we don't have to constantly reallocate docs slice
totalMaxSize := len(b.docs) + numMaxDocs
if cap(b.docs) < totalMaxSize {
- b.docs = make([]doc.Document, 0, totalMaxSize)
+ b.docs = make([]doc.Metadata, 0, totalMaxSize)
}
// First build metadata and docs slice
@@ -150,7 +150,7 @@ func (b *builderFromSegments) AddSegments(segments []segment.Segment) error {
return nil
}
-func (b *builderFromSegments) Docs() []doc.Document {
+func (b *builderFromSegments) Docs() []doc.Metadata {
return b.docs
}
@@ -159,10 +159,10 @@ func (b *builderFromSegments) AllDocs() (index.IDDocIterator, error) {
return index.NewIDDocIterator(b, rangeIter), nil
}
-func (b *builderFromSegments) Doc(id postings.ID) (doc.Document, error) {
+func (b *builderFromSegments) Doc(id postings.ID) (doc.Metadata, error) {
idx := int(id)
if idx < 0 || idx >= len(b.docs) {
- return doc.Document{}, errDocNotFound
+ return doc.Metadata{}, errDocNotFound
}
return b.docs[idx], nil
diff --git a/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go b/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go
index f7fc85d2a2..5938626ce6 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_field_iter_test.go
@@ -38,7 +38,7 @@ var (
func TestFieldIterFromSegmentsDeduplicates(t *testing.T) {
segments := []segmentMetadata{
- {segment: newTestSegmentWithDocs(t, []doc.Document{
+ {segment: newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("foo"),
Fields: []doc.Field{
@@ -47,7 +47,7 @@ func TestFieldIterFromSegmentsDeduplicates(t *testing.T) {
},
},
})},
- {segment: newTestSegmentWithDocs(t, []doc.Document{
+ {segment: newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("bar"),
Fields: []doc.Field{
@@ -80,7 +80,7 @@ func TestFieldIterFromSegmentsDeduplicates(t *testing.T) {
func TestFieldIterFromSegmentsSomeEmpty(t *testing.T) {
segments := []segmentMetadata{
- {segment: newTestSegmentWithDocs(t, []doc.Document{
+ {segment: newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("foo"),
Fields: []doc.Field{
@@ -89,7 +89,7 @@ func TestFieldIterFromSegmentsSomeEmpty(t *testing.T) {
},
},
})},
- {segment: newTestSegmentWithDocs(t, []doc.Document{})},
+ {segment: newTestSegmentWithDocs(t, []doc.Metadata{})},
}
iter, err := newFieldIterFromSegments(segments)
@@ -104,7 +104,7 @@ func TestFieldIterFromSegmentsSomeEmpty(t *testing.T) {
func TestFieldIterFromSegmentsIdentical(t *testing.T) {
segments := []segmentMetadata{
- {segment: newTestSegmentWithDocs(t, []doc.Document{
+ {segment: newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("foo"),
Fields: []doc.Field{
@@ -113,7 +113,7 @@ func TestFieldIterFromSegmentsIdentical(t *testing.T) {
},
},
})},
- {segment: newTestSegmentWithDocs(t, []doc.Document{
+ {segment: newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("bar"),
Fields: []doc.Field{
@@ -151,7 +151,7 @@ func assertIterValues(
func newTestSegmentWithDocs(
t *testing.T,
- docs []doc.Document,
+ docs []doc.Metadata,
) segment.Segment {
seg, err := mem.NewSegment(testMemOptions)
require.NoError(t, err)
diff --git a/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter_test.go b/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter_test.go
index 651a4ed346..9e781d3815 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter_test.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_field_postings_list_iter_test.go
@@ -32,7 +32,8 @@ import (
func TestFieldPostingsListIterFromSegments(t *testing.T) {
segments := []segment.Segment{
- newTestSegmentWithDocs(t, []doc.Document{
+ // nolint: dupl
+ newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("bux_0"),
Fields: []doc.Field{
@@ -50,7 +51,8 @@ func TestFieldPostingsListIterFromSegments(t *testing.T) {
},
},
}),
- newTestSegmentWithDocs(t, []doc.Document{
+ // nolint: dupl
+ newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("foo_0"),
Fields: []doc.Field{
@@ -68,7 +70,7 @@ func TestFieldPostingsListIterFromSegments(t *testing.T) {
},
},
}),
- newTestSegmentWithDocs(t, []doc.Document{
+ newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("bar_1"),
Fields: []doc.Field{
@@ -127,7 +129,7 @@ func TestFieldPostingsListIterFromSegments(t *testing.T) {
func checkIfFieldExistsInDoc(
field []byte,
- doc doc.Document,
+ doc doc.Metadata,
) bool {
found := false
for _, f := range doc.Fields {
diff --git a/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go b/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go
index 65219d12c5..99bc8c225d 100644
--- a/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go
+++ b/src/m3ninx/index/segment/builder/multi_segments_terms_iter_test.go
@@ -35,7 +35,7 @@ import (
func TestTermsIterFromSegmentsDeduplicates(t *testing.T) {
segments := []segment.Segment{
- newTestSegmentWithDocs(t, []doc.Document{
+ newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("foo"),
Fields: []doc.Field{
@@ -44,7 +44,7 @@ func TestTermsIterFromSegmentsDeduplicates(t *testing.T) {
},
},
}),
- newTestSegmentWithDocs(t, []doc.Document{
+ newTestSegmentWithDocs(t, []doc.Metadata{
{
ID: []byte("bar"),
Fields: []doc.Field{
@@ -93,7 +93,7 @@ func TestTermsIterFromSegmentsDeduplicates(t *testing.T) {
func assertTermsPostings(
t *testing.T,
- docs []doc.Document,
+ docs []doc.Metadata,
iter segment.TermsIterator,
expected termPostings,
) {
@@ -103,7 +103,7 @@ func assertTermsPostings(
termsPostingsString(docs, actual)))
}
-func termsPostingsString(docs []doc.Document, tp termPostings) string {
+func termsPostingsString(docs []doc.Metadata, tp termPostings) string {
str := strings.Builder{}
for k, ids := range tp {
str.WriteString(k)
diff --git a/src/m3ninx/index/segment/builder/postings_map_new.go b/src/m3ninx/index/segment/builder/postings_map_new.go
index 5fb871a420..f986d1345b 100644
--- a/src/m3ninx/index/segment/builder/postings_map_new.go
+++ b/src/m3ninx/index/segment/builder/postings_map_new.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/m3ninx/index/segment/fst/encoding/docs/data.go b/src/m3ninx/index/segment/fst/encoding/docs/data.go
index 08bfe1b86a..b702cfacfb 100644
--- a/src/m3ninx/index/segment/fst/encoding/docs/data.go
+++ b/src/m3ninx/index/segment/fst/encoding/docs/data.go
@@ -44,7 +44,7 @@ func NewDataWriter(w io.Writer) *DataWriter {
}
}
-func (w *DataWriter) Write(d doc.Document) (int, error) {
+func (w *DataWriter) Write(d doc.Metadata) (int, error) {
n := w.enc.PutBytes(d.ID)
n += w.enc.PutUvarint(uint64(len(d.Fields)))
for _, f := range d.Fields {
@@ -90,23 +90,23 @@ func NewDataReader(data []byte) *DataReader {
}
}
-func (r *DataReader) Read(offset uint64) (doc.Document, error) {
+func (r *DataReader) Read(offset uint64) (doc.Metadata, error) {
if offset >= uint64(len(r.data)) {
- return doc.Document{}, fmt.Errorf("invalid offset: %v is past the end of the data file", offset)
+ return doc.Metadata{}, fmt.Errorf("invalid offset: %v is past the end of the data file", offset)
}
dec := encoding.NewDecoder(r.data[int(offset):])
id, err := dec.Bytes()
if err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
x, err := dec.Uvarint()
if err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
n := int(x)
- d := doc.Document{
+ d := doc.Metadata{
ID: id,
Fields: make([]doc.Field, n),
}
@@ -114,11 +114,11 @@ func (r *DataReader) Read(offset uint64) (doc.Document, error) {
for i := 0; i < n; i++ {
name, err := dec.Bytes()
if err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
val, err := dec.Bytes()
if err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
d.Fields[i] = doc.Field{
Name: name,
diff --git a/src/m3ninx/index/segment/fst/encoding/docs/data_test.go b/src/m3ninx/index/segment/fst/encoding/docs/data_test.go
index 4e65dd7c76..1257b406c6 100644
--- a/src/m3ninx/index/segment/fst/encoding/docs/data_test.go
+++ b/src/m3ninx/index/segment/fst/encoding/docs/data_test.go
@@ -33,40 +33,40 @@ import (
func TestStoredFieldsData(t *testing.T) {
tests := []struct {
name string
- docs []doc.Document
+ docs []doc.Metadata
}{
{
name: "empty document",
- docs: []doc.Document{
- doc.Document{
+ docs: []doc.Metadata{
+ {
Fields: doc.Fields{},
},
},
},
{
name: "standard documents",
- docs: []doc.Document{
- doc.Document{
+ docs: []doc.Metadata{
+ {
ID: []byte("831992"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("080392"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
diff --git a/src/m3ninx/index/segment/fst/encoding/docs/slice.go b/src/m3ninx/index/segment/fst/encoding/docs/slice.go
index ccf7fd48b6..20df8c8a5e 100644
--- a/src/m3ninx/index/segment/fst/encoding/docs/slice.go
+++ b/src/m3ninx/index/segment/fst/encoding/docs/slice.go
@@ -38,11 +38,11 @@ var _ index.DocRetriever = (*SliceReader)(nil)
// SliceReader is a docs slice reader for use with documents
// stored in memory.
type SliceReader struct {
- docs []doc.Document
+ docs []doc.Metadata
}
// NewSliceReader returns a new docs slice reader.
-func NewSliceReader(docs []doc.Document) *SliceReader {
+func NewSliceReader(docs []doc.Metadata) *SliceReader {
return &SliceReader{docs: docs}
}
@@ -52,17 +52,17 @@ func (r *SliceReader) Len() int {
}
// Read returns a document from the docs slice reader.
-func (r *SliceReader) Read(id postings.ID) (doc.Document, error) {
+func (r *SliceReader) Read(id postings.ID) (doc.Metadata, error) {
idx := int(id)
if idx < 0 || idx >= len(r.docs) {
- return doc.Document{}, errDocNotFound
+ return doc.Metadata{}, errDocNotFound
}
return r.docs[idx], nil
}
// Doc implements DocRetriever and reads the document with postings ID.
-func (r *SliceReader) Doc(id postings.ID) (doc.Document, error) {
+func (r *SliceReader) Doc(id postings.ID) (doc.Metadata, error) {
return r.Read(id)
}
diff --git a/src/m3ninx/index/segment/fst/encoding/docs/types.go b/src/m3ninx/index/segment/fst/encoding/docs/types.go
index cf531d2608..3acf3c766d 100644
--- a/src/m3ninx/index/segment/fst/encoding/docs/types.go
+++ b/src/m3ninx/index/segment/fst/encoding/docs/types.go
@@ -31,7 +31,7 @@ type Reader interface {
// Len is the number of documents contained by the reader.
Len() int
// Read reads a document with the given postings ID.
- Read(id postings.ID) (doc.Document, error)
+ Read(id postings.ID) (doc.Metadata, error)
// Iter returns a document iterator.
Iter() index.IDDocIterator
}
diff --git a/src/m3ninx/index/segment/fst/segment.go b/src/m3ninx/index/segment/fst/segment.go
index 7d0efdd0a5..37091f6965 100644
--- a/src/m3ninx/index/segment/fst/segment.go
+++ b/src/m3ninx/index/segment/fst/segment.go
@@ -579,11 +579,11 @@ func (r *fsSegment) matchAllNotClosedMaybeFinalizedWithRLock() (postings.Mutable
return pl, nil
}
-func (r *fsSegment) docNotClosedMaybeFinalizedWithRLock(id postings.ID) (doc.Document, error) {
+func (r *fsSegment) docNotClosedMaybeFinalizedWithRLock(id postings.ID) (doc.Metadata, error) {
// NB(r): Not closed, but could be finalized (i.e. closed segment reader)
// calling match field after this segment is finalized.
if r.finalized {
- return doc.Document{}, errReaderFinalized
+ return doc.Metadata{}, errReaderFinalized
}
// If using docs slice reader, return from the in memory slice reader
@@ -593,7 +593,7 @@ func (r *fsSegment) docNotClosedMaybeFinalizedWithRLock(id postings.ID) (doc.Doc
offset, err := r.docsIndexReader.Read(id)
if err != nil {
- return doc.Document{}, err
+ return doc.Metadata{}, err
}
return r.docsDataReader.Read(offset)
@@ -896,9 +896,9 @@ func (sr *fsSegmentReader) MatchAll() (postings.MutableList, error) {
return pl, err
}
-func (sr *fsSegmentReader) Doc(id postings.ID) (doc.Document, error) {
+func (sr *fsSegmentReader) Doc(id postings.ID) (doc.Metadata, error) {
if sr.closed {
- return doc.Document{}, errReaderClosed
+ return doc.Metadata{}, errReaderClosed
}
// NB(r): We are allowed to call match field after Close called on
// the segment but not after it is finalized.
diff --git a/src/m3ninx/index/segment/fst/writer_reader_test.go b/src/m3ninx/index/segment/fst/writer_reader_test.go
index 5fe5a60a36..2c785fabcf 100644
--- a/src/m3ninx/index/segment/fst/writer_reader_test.go
+++ b/src/m3ninx/index/segment/fst/writer_reader_test.go
@@ -40,39 +40,39 @@ import (
var (
testOptions = NewOptions()
- fewTestDocuments = []doc.Document{
- doc.Document{
+ fewTestDocuments = []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("42"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("pineapple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -83,7 +83,7 @@ var (
testDocuments = []struct {
name string
- docs []doc.Document
+ docs []doc.Metadata
}{
{
name: "few documents",
@@ -101,7 +101,7 @@ type testSegmentCase struct {
expected, observed sgmt.Segment
}
-func newTestCases(t *testing.T, docs []doc.Document) []testSegmentCase {
+func newTestCases(t *testing.T, docs []doc.Metadata) []testSegmentCase {
memSeg, fstSeg := newTestSegments(t, docs)
fstWriter10Reader10 := newFSTSegmentWithVersion(t, memSeg, testOptions,
@@ -117,22 +117,22 @@ func newTestCases(t *testing.T, docs []doc.Document) []testSegmentCase {
Version{Major: 1, Minor: 1} /* reader version */)
return []testSegmentCase{
- testSegmentCase{ // mem sgmt v latest fst
+ { // mem sgmt v latest fst
name: "mem v fst",
expected: memSeg,
observed: fstSeg,
},
- testSegmentCase{ // mem sgmt v fst1.0
+ { // mem sgmt v fst1.0
name: "mem v fstWriter10Reader10",
expected: memSeg,
observed: fstWriter10Reader10,
},
- testSegmentCase{ // mem sgmt v fst (WriterV1.1; ReaderV1.0) -- i.e. ensure forward compatibility
+ { // mem sgmt v fst (WriterV1.1; ReaderV1.0) -- i.e. ensure forward compatibility
name: "mem v fstWriter11Reader10",
expected: memSeg,
observed: fstWriter11Reader10,
},
- testSegmentCase{ // mem sgmt v fst (WriterV1.1; ReaderV1.1)
+ { // mem sgmt v fst (WriterV1.1; ReaderV1.1)
name: "mem v fstWriter11Reader11",
expected: memSeg,
observed: fstWriter11Reader11,
@@ -568,7 +568,7 @@ func TestSegmentReaderValidUntilClose(t *testing.T) {
require.Error(t, err)
}
-func newTestSegments(t *testing.T, docs []doc.Document) (memSeg sgmt.MutableSegment, fstSeg sgmt.Segment) {
+func newTestSegments(t *testing.T, docs []doc.Metadata) (memSeg sgmt.MutableSegment, fstSeg sgmt.Segment) {
s := newTestMemSegment(t)
for _, d := range docs {
_, err := s.Insert(d)
@@ -647,8 +647,8 @@ func assertPostingsList(t *testing.T, l postings.List, exp []postings.ID) {
require.Fail(t, msg)
}
-func collectDocs(iter doc.Iterator) ([]doc.Document, error) {
- var docs []doc.Document
+func collectDocs(iter doc.Iterator) ([]doc.Metadata, error) {
+ var docs []doc.Metadata
for iter.Next() {
docs = append(docs, iter.Current())
}
diff --git a/src/m3ninx/index/segment/mem/fields_map_new.go b/src/m3ninx/index/segment/mem/fields_map_new.go
index c13cc1737e..598f09e756 100644
--- a/src/m3ninx/index/segment/mem/fields_map_new.go
+++ b/src/m3ninx/index/segment/mem/fields_map_new.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/m3ninx/index/segment/mem/mem_mock.go b/src/m3ninx/index/segment/mem/mem_mock.go
index af7483beb4..a51c4f759a 100644
--- a/src/m3ninx/index/segment/mem/mem_mock.go
+++ b/src/m3ninx/index/segment/mem/mem_mock.go
@@ -104,10 +104,10 @@ func (mr *MockReadableSegmentMockRecorder) Terms(arg0 interface{}) *gomock.Call
}
// getDoc mocks base method
-func (m *MockReadableSegment) getDoc(arg0 postings.ID) (doc.Document, error) {
+func (m *MockReadableSegment) getDoc(arg0 postings.ID) (doc.Metadata, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "getDoc", arg0)
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
ret1, _ := ret[1].(error)
return ret0, ret1
}
diff --git a/src/m3ninx/index/segment/mem/merge_test.go b/src/m3ninx/index/segment/mem/merge_test.go
index 0a89b41112..8323863bcd 100644
--- a/src/m3ninx/index/segment/mem/merge_test.go
+++ b/src/m3ninx/index/segment/mem/merge_test.go
@@ -30,41 +30,41 @@ import (
)
func TestMemSegmentMerge(t *testing.T) {
- docs := []doc.Document{
- doc.Document{
+ docs := []doc.Metadata{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
ID: []byte("cde"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("dfg"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("pineapple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -102,7 +102,7 @@ func TestMemSegmentMerge(t *testing.T) {
require.NoError(t, reader.Close())
}
-func assertReaderHasDoc(t *testing.T, r index.Reader, d doc.Document) {
+func assertReaderHasDoc(t *testing.T, r index.Reader, d doc.Metadata) {
iter, err := r.AllDocs()
require.NoError(t, err)
found := false
diff --git a/src/m3ninx/index/segment/mem/reader.go b/src/m3ninx/index/segment/mem/reader.go
index 385bc24e4a..35993ff0f7 100644
--- a/src/m3ninx/index/segment/mem/reader.go
+++ b/src/m3ninx/index/segment/mem/reader.go
@@ -124,15 +124,15 @@ func (r *reader) MatchAll() (postings.MutableList, error) {
return pl, nil
}
-func (r *reader) Doc(id postings.ID) (doc.Document, error) {
+func (r *reader) Doc(id postings.ID) (doc.Metadata, error) {
r.RLock()
defer r.RUnlock()
if r.closed {
- return doc.Document{}, errSegmentReaderClosed
+ return doc.Metadata{}, errSegmentReaderClosed
}
if id < r.limits.startInclusive || id >= r.limits.endExclusive {
- return doc.Document{}, index.ErrDocNotFound
+ return doc.Metadata{}, index.ErrDocNotFound
}
return r.segment.getDoc(id)
diff --git a/src/m3ninx/index/segment/mem/reader_test.go b/src/m3ninx/index/segment/mem/reader_test.go
index 74abcb36fd..cc3ab7e9b8 100644
--- a/src/m3ninx/index/segment/mem/reader_test.go
+++ b/src/m3ninx/index/segment/mem/reader_test.go
@@ -29,7 +29,7 @@ import (
"github.com/m3db/m3/src/m3ninx/postings"
"github.com/m3db/m3/src/m3ninx/postings/roaring"
- gomock "github.com/golang/mock/gomock"
+ "github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
)
@@ -115,18 +115,18 @@ func TestReaderDocs(t *testing.T) {
defer mockCtrl.Finish()
maxID := postings.ID(50)
- docs := []doc.Document{
- doc.Document{
+ docs := []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("apple"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("banana"),
Value: []byte("yellow"),
},
@@ -150,7 +150,7 @@ func TestReaderDocs(t *testing.T) {
iter, err := reader.Docs(postingsList)
require.NoError(t, err)
- actualDocs := make([]doc.Document, 0, len(docs))
+ actualDocs := make([]doc.Metadata, 0, len(docs))
for iter.Next() {
actualDocs = append(actualDocs, iter.Current())
}
@@ -168,18 +168,18 @@ func TestReaderAllDocs(t *testing.T) {
defer mockCtrl.Finish()
maxID := postings.ID(2)
- docs := []doc.Document{
- doc.Document{
+ docs := []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("apple"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("banana"),
Value: []byte("yellow"),
},
@@ -197,7 +197,7 @@ func TestReaderAllDocs(t *testing.T) {
iter, err := reader.AllDocs()
require.NoError(t, err)
- actualDocs := make([]doc.Document, 0, len(docs))
+ actualDocs := make([]doc.Metadata, 0, len(docs))
for iter.Next() {
actualDocs = append(actualDocs, iter.Current())
}
diff --git a/src/m3ninx/index/segment/mem/segment.go b/src/m3ninx/index/segment/mem/segment.go
index 9a45d1b2ec..3c47c047da 100644
--- a/src/m3ninx/index/segment/mem/segment.go
+++ b/src/m3ninx/index/segment/mem/segment.go
@@ -52,7 +52,7 @@ type memSegment struct {
// Mapping of postings ID to document.
docs struct {
sync.RWMutex
- data []doc.Document
+ data []doc.Metadata
}
// Mapping of term to postings list.
@@ -76,7 +76,7 @@ func NewSegment(opts Options) (segment.MutableSegment, error) {
readerID: postings.NewAtomicID(0),
}
- s.docs.data = make([]doc.Document, opts.InitialCapacity())
+ s.docs.data = make([]doc.Metadata, opts.InitialCapacity())
s.writer.idSet = newIDsMap(256)
s.writer.nextID = 0
@@ -100,7 +100,7 @@ func (s *memSegment) Reset() {
s.termsDict.Reset()
s.readerID = postings.NewAtomicID(0)
- var empty doc.Document
+ var empty doc.Metadata
for i := range s.docs.data {
s.docs.data[i] = empty
}
@@ -121,7 +121,7 @@ func (s *memSegment) Size() int64 {
return size
}
-func (s *memSegment) Docs() []doc.Document {
+func (s *memSegment) Docs() []doc.Metadata {
s.state.RLock()
defer s.state.RUnlock()
@@ -159,7 +159,7 @@ func (s *memSegment) ContainsField(f []byte) (bool, error) {
return contains, nil
}
-func (s *memSegment) Insert(d doc.Document) ([]byte, error) {
+func (s *memSegment) Insert(d doc.Metadata) ([]byte, error) {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
@@ -170,7 +170,7 @@ func (s *memSegment) Insert(d doc.Document) ([]byte, error) {
s.writer.Lock()
defer s.writer.Unlock()
- b := index.NewBatch([]doc.Document{d})
+ b := index.NewBatch([]doc.Metadata{d})
b.AllowPartialUpdates = false
if err := s.prepareDocsWithLocks(b, nil); err != nil {
return nil, err
@@ -237,7 +237,7 @@ func (s *memSegment) prepareDocsWithLocks(
) error {
s.writer.idSet.Reset()
- var emptyDoc doc.Document
+ var emptyDoc doc.Metadata
for i := 0; i < len(b.Docs); i++ {
d := b.Docs[i]
if err := d.Validate(); err != nil {
@@ -293,7 +293,7 @@ func (s *memSegment) prepareDocsWithLocks(
// insertDocWithLocks inserts a document into the index. It must be called with the
// state and writer locks.
-func (s *memSegment) insertDocWithLocks(d doc.Document) error {
+func (s *memSegment) insertDocWithLocks(d doc.Metadata) error {
nextID := s.writer.nextID
s.storeDocWithStateLock(nextID, d)
s.writer.nextID++
@@ -302,7 +302,7 @@ func (s *memSegment) insertDocWithLocks(d doc.Document) error {
// indexDocWithStateLock indexes the fields of a document in the segment's terms
// dictionary. It must be called with the segment's state lock.
-func (s *memSegment) indexDocWithStateLock(id postings.ID, d doc.Document) error {
+func (s *memSegment) indexDocWithStateLock(id postings.ID, d doc.Metadata) error {
for _, f := range d.Fields {
if err := s.termsDict.Insert(f, id); err != nil {
return err
@@ -316,7 +316,7 @@ func (s *memSegment) indexDocWithStateLock(id postings.ID, d doc.Document) error
// storeDocWithStateLock stores a documents into the segment's mapping of postings
// IDs to documents. It must be called with the segment's state lock.
-func (s *memSegment) storeDocWithStateLock(id postings.ID, d doc.Document) {
+func (s *memSegment) storeDocWithStateLock(id postings.ID, d doc.Metadata) {
idx := int(id)
// Can return early if we have sufficient capacity.
@@ -346,7 +346,7 @@ func (s *memSegment) storeDocWithStateLock(id postings.ID, d doc.Document) {
return
}
- data := make([]doc.Document, 2*(size+1))
+ data := make([]doc.Metadata, 2*(size+1))
copy(data, s.docs.data)
s.docs.data = data
s.docs.data[idx] = d
@@ -396,11 +396,11 @@ func (s *memSegment) matchRegexp(field []byte, compiled *re.Regexp) (postings.Li
return s.termsDict.MatchRegexp(field, compiled), nil
}
-func (s *memSegment) getDoc(id postings.ID) (doc.Document, error) {
+func (s *memSegment) getDoc(id postings.ID) (doc.Metadata, error) {
s.state.RLock()
defer s.state.RUnlock()
if s.state.closed {
- return doc.Document{}, segment.ErrClosed
+ return doc.Metadata{}, segment.ErrClosed
}
idx := int(id)
@@ -408,7 +408,7 @@ func (s *memSegment) getDoc(id postings.ID) (doc.Document, error) {
s.docs.RLock()
if idx >= len(s.docs.data) {
s.docs.RUnlock()
- return doc.Document{}, index.ErrDocNotFound
+ return doc.Metadata{}, index.ErrDocNotFound
}
d := s.docs.data[idx]
s.docs.RUnlock()
diff --git a/src/m3ninx/index/segment/mem/segment_bench_test.go b/src/m3ninx/index/segment/mem/segment_bench_test.go
index ee344f17a9..65b92276f8 100644
--- a/src/m3ninx/index/segment/mem/segment_bench_test.go
+++ b/src/m3ninx/index/segment/mem/segment_bench_test.go
@@ -37,7 +37,7 @@ var (
func BenchmarkSegment(b *testing.B) {
benchmarks := []struct {
name string
- fn func(docs []doc.Document, b *testing.B)
+ fn func(docs []doc.Metadata, b *testing.B)
}{
{
name: "benchmark Insert with segment",
@@ -65,7 +65,7 @@ func BenchmarkSegment(b *testing.B) {
}
}
-func benchmarkInsertSegment(docs []doc.Document, b *testing.B) {
+func benchmarkInsertSegment(docs []doc.Metadata, b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
@@ -82,7 +82,7 @@ func benchmarkInsertSegment(docs []doc.Document, b *testing.B) {
}
}
-func benchmarkMatchTermSegment(docs []doc.Document, b *testing.B) {
+func benchmarkMatchTermSegment(docs []doc.Metadata, b *testing.B) {
b.ReportAllocs()
sgmt, err := NewSegment(NewOptions())
@@ -104,7 +104,7 @@ func benchmarkMatchTermSegment(docs []doc.Document, b *testing.B) {
}
}
-func benchmarkMatchRegexSegment(docs []doc.Document, b *testing.B) {
+func benchmarkMatchRegexSegment(docs []doc.Metadata, b *testing.B) {
b.ReportAllocs()
sgmt, err := NewSegment(NewOptions())
diff --git a/src/m3ninx/index/segment/mem/segment_test.go b/src/m3ninx/index/segment/mem/segment_test.go
index 60137213ca..3ec0aebb00 100644
--- a/src/m3ninx/index/segment/mem/segment_test.go
+++ b/src/m3ninx/index/segment/mem/segment_test.go
@@ -34,39 +34,39 @@ import (
var (
testOptions = NewOptions()
- testDocuments = []doc.Document{
- doc.Document{
+ testDocuments = []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("42"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("pineapple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -78,13 +78,13 @@ var (
func TestSegmentInsert(t *testing.T) {
tests := []struct {
name string
- input doc.Document
+ input doc.Metadata
}{
{
name: "document without an ID",
- input: doc.Document{
+ input: doc.Metadata{
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("apple"),
Value: []byte("red"),
},
@@ -93,10 +93,10 @@ func TestSegmentInsert(t *testing.T) {
},
{
name: "document with an ID",
- input: doc.Document{
+ input: doc.Metadata{
ID: []byte("123"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("apple"),
Value: []byte("red"),
},
@@ -145,23 +145,23 @@ func TestSegmentInsert(t *testing.T) {
func TestSegmentInsertDuplicateID(t *testing.T) {
var (
id = []byte("123")
- first = doc.Document{
+ first = doc.Metadata{
ID: id,
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("apple"),
Value: []byte("red"),
},
},
}
- second = doc.Document{
+ second = doc.Metadata{
ID: id,
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("apple"),
Value: []byte("red"),
},
- doc.Field{
+ {
Name: []byte("variety"),
Value: []byte("fuji"),
},
@@ -212,27 +212,27 @@ func TestSegmentInsertBatch(t *testing.T) {
{
name: "valid batch",
input: index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("831992"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -274,26 +274,26 @@ func TestSegmentInsertBatchError(t *testing.T) {
{
name: "invalid document",
input: index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color\xff"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -326,27 +326,27 @@ func TestSegmentInsertBatchPartialError(t *testing.T) {
{
name: "invalid document",
input: index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color\xff"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -359,28 +359,28 @@ func TestSegmentInsertBatchPartialError(t *testing.T) {
{
name: "duplicate ID",
input: index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
ID: []byte("831992"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("831992"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -430,28 +430,28 @@ func TestSegmentInsertBatchPartialError(t *testing.T) {
func TestSegmentInsertBatchPartialErrorInvalidDoc(t *testing.T) {
b1 := index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color\xff"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -484,28 +484,28 @@ func TestSegmentInsertBatchPartialErrorInvalidDoc(t *testing.T) {
func TestSegmentContainsID(t *testing.T) {
b1 := index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color\xff"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -544,28 +544,28 @@ func TestSegmentContainsID(t *testing.T) {
}
func TestSegmentContainsField(t *testing.T) {
- docs := []doc.Document{
- doc.Document{
+ docs := []doc.Metadata{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("colour"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("cde"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
@@ -589,15 +589,15 @@ func TestSegmentContainsField(t *testing.T) {
func TestSegmentInsertBatchPartialErrorAlreadyIndexing(t *testing.T) {
b1 := index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
@@ -607,33 +607,33 @@ func TestSegmentInsertBatchPartialErrorAlreadyIndexing(t *testing.T) {
index.AllowPartialUpdates())
b2 := index.NewBatch(
- []doc.Document{
- doc.Document{
+ []doc.Metadata{
+ {
ID: []byte("abc"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("cdef"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("blue"),
},
},
},
- doc.Document{
+ {
ID: []byte("cdef"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("blue"),
},
@@ -657,39 +657,39 @@ func TestSegmentInsertBatchPartialErrorAlreadyIndexing(t *testing.T) {
}
func TestSegmentReaderMatchExact(t *testing.T) {
- docs := []doc.Document{
- doc.Document{
+ docs := []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
ID: []byte("83"),
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("banana"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("fruit"),
Value: []byte("apple"),
},
- doc.Field{
+ {
Name: []byte("color"),
Value: []byte("green"),
},
@@ -717,7 +717,7 @@ func TestSegmentReaderMatchExact(t *testing.T) {
iter, err := r.Docs(pl)
require.NoError(t, err)
- actualDocs := make([]doc.Document, 0)
+ actualDocs := make([]doc.Metadata, 0)
for iter.Next() {
actualDocs = append(actualDocs, iter.Current())
}
@@ -725,7 +725,7 @@ func TestSegmentReaderMatchExact(t *testing.T) {
require.NoError(t, iter.Err())
require.NoError(t, iter.Close())
- expectedDocs := []doc.Document{docs[0], docs[2]}
+ expectedDocs := []doc.Metadata{docs[0], docs[2]}
require.Equal(t, len(expectedDocs), len(actualDocs))
for i := range actualDocs {
require.True(t, compareDocs(expectedDocs[i], actualDocs[i]))
@@ -848,7 +848,7 @@ func TestSegmentReaderMatchRegex(t *testing.T) {
iter, err := r.Docs(pl)
require.NoError(t, err)
- actualDocs := make([]doc.Document, 0)
+ actualDocs := make([]doc.Metadata, 0)
for iter.Next() {
actualDocs = append(actualDocs, iter.Current())
}
@@ -856,7 +856,7 @@ func TestSegmentReaderMatchRegex(t *testing.T) {
require.NoError(t, iter.Err())
require.NoError(t, iter.Close())
- expectedDocs := []doc.Document{docs[1], docs[2]}
+ expectedDocs := []doc.Metadata{docs[1], docs[2]}
require.Equal(t, len(expectedDocs), len(actualDocs))
for i := range actualDocs {
require.True(t, compareDocs(expectedDocs[i], actualDocs[i]))
@@ -866,7 +866,7 @@ func TestSegmentReaderMatchRegex(t *testing.T) {
require.NoError(t, segment.Close())
}
-func testDocument(t *testing.T, d doc.Document, r index.Reader) {
+func testDocument(t *testing.T, d doc.Metadata, r index.Reader) {
for _, f := range d.Fields {
name, value := f.Name, f.Value
pl, err := r.MatchTerm(name, value)
@@ -893,7 +893,7 @@ func testDocument(t *testing.T, d doc.Document, r index.Reader) {
// compareDocs returns whether two documents are equal. If the actual doc contains
// an ID but the expected doc does not then the ID is excluded from the comparison
// since it was auto-generated.
-func compareDocs(expected, actual doc.Document) bool {
+func compareDocs(expected, actual doc.Metadata) bool {
if actual.HasID() && !expected.HasID() {
actual.ID = nil
}
diff --git a/src/m3ninx/index/segment/mem/terms_dict_bench_test.go b/src/m3ninx/index/segment/mem/terms_dict_bench_test.go
index 010e49ed38..fb3d0934d2 100644
--- a/src/m3ninx/index/segment/mem/terms_dict_bench_test.go
+++ b/src/m3ninx/index/segment/mem/terms_dict_bench_test.go
@@ -38,7 +38,7 @@ var (
func BenchmarkTermsDict(b *testing.B) {
benchmarks := []struct {
name string
- fn func(docs []doc.Document, b *testing.B)
+ fn func(docs []doc.Metadata, b *testing.B)
}{
{
name: "benchmark Insert",
@@ -66,7 +66,7 @@ func BenchmarkTermsDict(b *testing.B) {
}
}
-func benchmarkTermsDictInsert(docs []doc.Document, b *testing.B) {
+func benchmarkTermsDictInsert(docs []doc.Metadata, b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
@@ -81,7 +81,7 @@ func benchmarkTermsDictInsert(docs []doc.Document, b *testing.B) {
}
}
-func benchmarkTermsDictMatchTerm(docs []doc.Document, b *testing.B) {
+func benchmarkTermsDictMatchTerm(docs []doc.Metadata, b *testing.B) {
b.ReportAllocs()
dict := newTermsDict(NewOptions())
@@ -101,7 +101,7 @@ func benchmarkTermsDictMatchTerm(docs []doc.Document, b *testing.B) {
}
}
-func benchmarkTermsDictMatchRegex(docs []doc.Document, b *testing.B) {
+func benchmarkTermsDictMatchRegex(docs []doc.Metadata, b *testing.B) {
b.ReportAllocs()
dict := newTermsDict(NewOptions())
diff --git a/src/m3ninx/index/segment/mem/types.go b/src/m3ninx/index/segment/mem/types.go
index 13bbea45fc..9a5268e0b3 100644
--- a/src/m3ninx/index/segment/mem/types.go
+++ b/src/m3ninx/index/segment/mem/types.go
@@ -74,5 +74,5 @@ type ReadableSegment interface {
Terms(field []byte) (sgmt.TermsIterator, error)
matchTerm(field, term []byte) (postings.List, error)
matchRegexp(field []byte, compiled *re.Regexp) (postings.List, error)
- getDoc(id postings.ID) (doc.Document, error)
+ getDoc(id postings.ID) (doc.Metadata, error)
}
diff --git a/src/m3ninx/index/segment/segment_mock.go b/src/m3ninx/index/segment/segment_mock.go
index 4721f195c2..3fc758bb7f 100644
--- a/src/m3ninx/index/segment/segment_mock.go
+++ b/src/m3ninx/index/segment/segment_mock.go
@@ -182,10 +182,10 @@ func (m *MockReader) EXPECT() *MockReaderMockRecorder {
}
// Doc mocks base method
-func (m *MockReader) Doc(id postings.ID) (doc.Document, error) {
+func (m *MockReader) Doc(id postings.ID) (doc.Metadata, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Doc", id)
- ret0, _ := ret[0].(doc.Document)
+ ret0, _ := ret[0].(doc.Metadata)
ret1, _ := ret[1].(error)
return ret0, ret1
}
@@ -1009,10 +1009,10 @@ func (mr *MockMutableSegmentMockRecorder) Reset() *gomock.Call {
}
// Docs mocks base method
-func (m *MockMutableSegment) Docs() []doc.Document {
+func (m *MockMutableSegment) Docs() []doc.Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Docs")
- ret0, _ := ret[0].([]doc.Document)
+ ret0, _ := ret[0].([]doc.Metadata)
return ret0
}
@@ -1038,7 +1038,7 @@ func (mr *MockMutableSegmentMockRecorder) AllDocs() *gomock.Call {
}
// Insert mocks base method
-func (m *MockMutableSegment) Insert(d doc.Document) ([]byte, error) {
+func (m *MockMutableSegment) Insert(d doc.Metadata) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", d)
ret0, _ := ret[0].([]byte)
@@ -1339,10 +1339,10 @@ func (mr *MockBuilderMockRecorder) Reset() *gomock.Call {
}
// Docs mocks base method
-func (m *MockBuilder) Docs() []doc.Document {
+func (m *MockBuilder) Docs() []doc.Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Docs")
- ret0, _ := ret[0].([]doc.Document)
+ ret0, _ := ret[0].([]doc.Metadata)
return ret0
}
@@ -1433,10 +1433,10 @@ func (mr *MockDocumentsBuilderMockRecorder) Reset() *gomock.Call {
}
// Docs mocks base method
-func (m *MockDocumentsBuilder) Docs() []doc.Document {
+func (m *MockDocumentsBuilder) Docs() []doc.Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Docs")
- ret0, _ := ret[0].([]doc.Document)
+ ret0, _ := ret[0].([]doc.Metadata)
return ret0
}
@@ -1462,7 +1462,7 @@ func (mr *MockDocumentsBuilderMockRecorder) AllDocs() *gomock.Call {
}
// Insert mocks base method
-func (m *MockDocumentsBuilder) Insert(d doc.Document) ([]byte, error) {
+func (m *MockDocumentsBuilder) Insert(d doc.Metadata) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", d)
ret0, _ := ret[0].([]byte)
@@ -1582,10 +1582,10 @@ func (mr *MockCloseableDocumentsBuilderMockRecorder) Reset() *gomock.Call {
}
// Docs mocks base method
-func (m *MockCloseableDocumentsBuilder) Docs() []doc.Document {
+func (m *MockCloseableDocumentsBuilder) Docs() []doc.Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Docs")
- ret0, _ := ret[0].([]doc.Document)
+ ret0, _ := ret[0].([]doc.Metadata)
return ret0
}
@@ -1611,7 +1611,7 @@ func (mr *MockCloseableDocumentsBuilderMockRecorder) AllDocs() *gomock.Call {
}
// Insert mocks base method
-func (m *MockCloseableDocumentsBuilder) Insert(d doc.Document) ([]byte, error) {
+func (m *MockCloseableDocumentsBuilder) Insert(d doc.Metadata) ([]byte, error) {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Insert", d)
ret0, _ := ret[0].([]byte)
@@ -1745,10 +1745,10 @@ func (mr *MockSegmentsBuilderMockRecorder) Reset() *gomock.Call {
}
// Docs mocks base method
-func (m *MockSegmentsBuilder) Docs() []doc.Document {
+func (m *MockSegmentsBuilder) Docs() []doc.Metadata {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "Docs")
- ret0, _ := ret[0].([]doc.Document)
+ ret0, _ := ret[0].([]doc.Metadata)
return ret0
}
diff --git a/src/m3ninx/index/segment/types.go b/src/m3ninx/index/segment/types.go
index 566a25bd00..0b176dc6de 100644
--- a/src/m3ninx/index/segment/types.go
+++ b/src/m3ninx/index/segment/types.go
@@ -190,7 +190,7 @@ type Builder interface {
// Docs returns the current docs slice, this is not safe to modify
// and is invalidated on a call to reset.
- Docs() []doc.Document
+ Docs() []doc.Metadata
// AllDocs returns an iterator over the documents known to the Reader.
AllDocs() (index.IDDocIterator, error)
diff --git a/src/m3ninx/index/types.go b/src/m3ninx/index/types.go
index 99a141fba6..99e4de0c6e 100644
--- a/src/m3ninx/index/types.go
+++ b/src/m3ninx/index/types.go
@@ -50,7 +50,7 @@ type Index interface {
type Writer interface {
// Insert inserts the given document into the index and returns its ID. The document
// is guaranteed to be searchable once the Insert method returns.
- Insert(d doc.Document) ([]byte, error)
+ Insert(d doc.Metadata) ([]byte, error)
// InsertBatch inserts a batch of metrics into the index. The documents are guaranteed
// to be searchable all at once when the Batch method returns. If the batch supports
@@ -97,7 +97,7 @@ type CompiledRegex struct {
// DocRetriever returns the document associated with a postings ID. It returns
// ErrDocNotFound if there is no document corresponding to the given postings ID.
type DocRetriever interface {
- Doc(id postings.ID) (doc.Document, error)
+ Doc(id postings.ID) (doc.Metadata, error)
}
// IDDocIterator is an extented documents Iterator which can also return the postings
diff --git a/src/m3ninx/search/executor/executor_test.go b/src/m3ninx/search/executor/executor_test.go
index 25e611efc0..15d7c90f34 100644
--- a/src/m3ninx/search/executor/executor_test.go
+++ b/src/m3ninx/search/executor/executor_test.go
@@ -36,7 +36,7 @@ type testIterator struct{}
func newTestIterator() testIterator { return testIterator{} }
func (it testIterator) Next() bool { return false }
-func (it testIterator) Current() doc.Document { return doc.Document{} }
+func (it testIterator) Current() doc.Metadata { return doc.Metadata{} }
func (it testIterator) Err() error { return nil }
func (it testIterator) Close() error { return nil }
diff --git a/src/m3ninx/search/executor/iterator.go b/src/m3ninx/search/executor/iterator.go
index a95e95bb75..da310902e7 100644
--- a/src/m3ninx/search/executor/iterator.go
+++ b/src/m3ninx/search/executor/iterator.go
@@ -31,7 +31,7 @@ type iterator struct {
readers index.Readers
idx int
- currDoc doc.Document
+ currDoc doc.Metadata
currIter doc.Iterator
err error
@@ -91,7 +91,7 @@ func (it *iterator) Next() bool {
return true
}
-func (it *iterator) Current() doc.Document {
+func (it *iterator) Current() doc.Metadata {
return it.currDoc
}
diff --git a/src/m3ninx/search/executor/iterator_test.go b/src/m3ninx/search/executor/iterator_test.go
index ab682e02c1..1e56f4abd7 100644
--- a/src/m3ninx/search/executor/iterator_test.go
+++ b/src/m3ninx/search/executor/iterator_test.go
@@ -44,26 +44,26 @@ func TestIterator(t *testing.T) {
require.NoError(t, secondPL.Insert(67))
// Set up Readers.
- docs := []doc.Document{
- doc.Document{
+ docs := []doc.Metadata{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("apple"),
Value: []byte("red"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("banana"),
Value: []byte("yellow"),
},
},
},
- doc.Document{
+ {
Fields: []doc.Field{
- doc.Field{
+ {
Name: []byte("carrot"),
Value: []byte("orange"),
},
diff --git a/src/m3ninx/search/proptest/issue865_test.go b/src/m3ninx/search/proptest/issue865_test.go
index e63e795dd3..3a257a50e3 100644
--- a/src/m3ninx/search/proptest/issue865_test.go
+++ b/src/m3ninx/search/proptest/issue865_test.go
@@ -42,7 +42,7 @@ import (
// NB(prateek): this test simulates the issues described in issue: https://github.com/m3db/m3/issues/865
var (
- doc1 = doc.Document{
+ doc1 = doc.Metadata{
ID: []byte("__name__=node_cpu_seconds_total,cpu=1,instance=m3db-node01:9100,job=node-exporter,mode=system,"),
Fields: []doc.Field{
doc.Field{[]byte("cpu"), []byte("1")},
@@ -52,7 +52,7 @@ var (
doc.Field{[]byte("mode"), []byte("system")},
},
}
- doc2 = doc.Document{
+ doc2 = doc.Metadata{
ID: []byte("__name__=node_memory_SwapTotal_bytes,instance=m3db-node01:9100,job=node-exporter,"),
Fields: []doc.Field{
doc.Field{[]byte("__name__"), []byte("node_memory_SwapTotal_bytes")},
@@ -60,7 +60,7 @@ var (
doc.Field{[]byte("job"), []byte("node-exporter")},
},
}
- doc3 = doc.Document{
+ doc3 = doc.Metadata{
ID: []byte("__name__=node_memory_SwapTotal_bytes,instance=alertmanager03:9100,job=node-exporter,"),
Fields: []doc.Field{
doc.Field{[]byte("__name__"), []byte("node_memory_SwapTotal_bytes")},
@@ -68,7 +68,7 @@ var (
doc.Field{[]byte("job"), []byte("node-exporter")},
},
}
- doc4 = doc.Document{
+ doc4 = doc.Metadata{
ID: []byte("__name__=node_memory_SwapTotal_bytes,instance=prometheus01:9100,job=node-exporter,"),
Fields: []doc.Field{
doc.Field{[]byte("__name__"), []byte("node_memory_SwapTotal_bytes")},
@@ -76,7 +76,7 @@ var (
doc.Field{[]byte("job"), []byte("node-exporter")},
},
}
- simpleTestDocs = []doc.Document{doc1, doc2, doc3, doc4}
+ simpleTestDocs = []doc.Metadata{doc1, doc2, doc3, doc4}
)
func TestAnyDistributionOfDocsDoesNotAffectQuery(t *testing.T) {
diff --git a/src/m3ninx/search/proptest/query_gen.go b/src/m3ninx/search/proptest/query_gen.go
index f7e6820345..ed3c06e13e 100644
--- a/src/m3ninx/search/proptest/query_gen.go
+++ b/src/m3ninx/search/proptest/query_gen.go
@@ -33,12 +33,12 @@ import (
)
// GenAllQuery generates an all query.
-func GenAllQuery(docs []doc.Document) gopter.Gen {
+func GenAllQuery(docs []doc.Metadata) gopter.Gen {
return gen.Const(query.NewAllQuery())
}
// GenFieldQuery generates a field query.
-func GenFieldQuery(docs []doc.Document) gopter.Gen {
+func GenFieldQuery(docs []doc.Metadata) gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
fieldName, _ := fieldNameAndValue(genParams, docs)
q := query.NewFieldQuery(fieldName)
@@ -47,7 +47,7 @@ func GenFieldQuery(docs []doc.Document) gopter.Gen {
}
// GenTermQuery generates a term query.
-func GenTermQuery(docs []doc.Document) gopter.Gen {
+func GenTermQuery(docs []doc.Metadata) gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
fieldName, fieldValue := fieldNameAndValue(genParams, docs)
q := query.NewTermQuery(fieldName, fieldValue)
@@ -55,7 +55,7 @@ func GenTermQuery(docs []doc.Document) gopter.Gen {
}
}
-func fieldNameAndValue(genParams *gopter.GenParameters, docs []doc.Document) ([]byte, []byte) {
+func fieldNameAndValue(genParams *gopter.GenParameters, docs []doc.Metadata) ([]byte, []byte) {
docIDRes, ok := gen.IntRange(0, len(docs)-1)(genParams).Retrieve()
if !ok {
panic("unable to generate term query") // should never happen
@@ -75,7 +75,7 @@ func fieldNameAndValue(genParams *gopter.GenParameters, docs []doc.Document) ([]
// GenIdenticalTermAndRegexpQuery generates a term query and regexp query with
// the exact same underlying field and pattern.
-func GenIdenticalTermAndRegexpQuery(docs []doc.Document) gopter.Gen {
+func GenIdenticalTermAndRegexpQuery(docs []doc.Metadata) gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
fieldName, fieldValue := fieldNameAndValue(genParams, docs)
termQ := query.NewTermQuery(fieldName, fieldValue)
@@ -88,7 +88,7 @@ func GenIdenticalTermAndRegexpQuery(docs []doc.Document) gopter.Gen {
}
// GenRegexpQuery generates a regexp query.
-func GenRegexpQuery(docs []doc.Document) gopter.Gen {
+func GenRegexpQuery(docs []doc.Metadata) gopter.Gen {
return func(genParams *gopter.GenParameters) *gopter.GenResult {
docIDRes, ok := gen.IntRange(0, len(docs)-1)(genParams).Retrieve()
if !ok {
@@ -137,7 +137,7 @@ func GenRegexpQuery(docs []doc.Document) gopter.Gen {
}
// GenNegationQuery generates a negation query.
-func GenNegationQuery(docs []doc.Document) gopter.Gen {
+func GenNegationQuery(docs []doc.Metadata) gopter.Gen {
return gen.OneGenOf(
GenFieldQuery(docs),
GenTermQuery(docs),
@@ -149,7 +149,7 @@ func GenNegationQuery(docs []doc.Document) gopter.Gen {
}
// GenConjunctionQuery generates a conjunction query.
-func GenConjunctionQuery(docs []doc.Document) gopter.Gen {
+func GenConjunctionQuery(docs []doc.Metadata) gopter.Gen {
return gen.SliceOf(
gen.OneGenOf(
GenFieldQuery(docs),
@@ -163,7 +163,7 @@ func GenConjunctionQuery(docs []doc.Document) gopter.Gen {
}
// GenDisjunctionQuery generates a disjunction query.
-func GenDisjunctionQuery(docs []doc.Document) gopter.Gen {
+func GenDisjunctionQuery(docs []doc.Metadata) gopter.Gen {
return gen.SliceOf(
gen.OneGenOf(
GenFieldQuery(docs),
@@ -177,7 +177,7 @@ func GenDisjunctionQuery(docs []doc.Document) gopter.Gen {
}
// GenQuery generates a query.
-func GenQuery(docs []doc.Document) gopter.Gen {
+func GenQuery(docs []doc.Metadata) gopter.Gen {
return gen.OneGenOf(
GenAllQuery(docs),
GenFieldQuery(docs),
diff --git a/src/m3ninx/search/proptest/segment_gen.go b/src/m3ninx/search/proptest/segment_gen.go
index dbc6ad9e43..41ef29daa7 100644
--- a/src/m3ninx/search/proptest/segment_gen.go
+++ b/src/m3ninx/search/proptest/segment_gen.go
@@ -40,8 +40,8 @@ var (
fstOptions = fst.NewOptions()
)
-func collectDocs(iter doc.Iterator) ([]doc.Document, error) {
- var docs []doc.Document
+func collectDocs(iter doc.Iterator) ([]doc.Metadata, error) {
+ var docs []doc.Metadata
for iter.Next() {
docs = append(docs, iter.Current())
}
@@ -53,7 +53,7 @@ func collectDocs(iter doc.Iterator) ([]doc.Document, error) {
return docs, nil
}
-func newTestMemSegment(t *testing.T, docs []doc.Document) segment.MutableSegment {
+func newTestMemSegment(t *testing.T, docs []doc.Metadata) segment.MutableSegment {
opts := mem.NewOptions()
s, err := mem.NewSegment(opts)
require.NoError(t, err)
@@ -64,7 +64,7 @@ func newTestMemSegment(t *testing.T, docs []doc.Document) segment.MutableSegment
return s
}
-func (i propTestInput) generate(t *testing.T, docs []doc.Document) []segment.Segment {
+func (i propTestInput) generate(t *testing.T, docs []doc.Metadata) []segment.Segment {
var result []segment.Segment
for j := 0; j < len(i.segments); j++ {
s, err := mem.NewSegment(memOptions)
diff --git a/src/m3ninx/search/proptest/util.go b/src/m3ninx/search/proptest/util.go
index b4c35706ab..2e2fd9d71c 100644
--- a/src/m3ninx/search/proptest/util.go
+++ b/src/m3ninx/search/proptest/util.go
@@ -27,11 +27,11 @@ import (
)
type documentIteratorMatcher struct {
- expectedDocs map[string]doc.Document
+ expectedDocs map[string]doc.Metadata
}
-func newDocumentIteratorMatcher(docs ...doc.Document) (*documentIteratorMatcher, error) {
- docMap := make(map[string]doc.Document, len(docs))
+func newDocumentIteratorMatcher(docs ...doc.Metadata) (*documentIteratorMatcher, error) {
+ docMap := make(map[string]doc.Metadata, len(docs))
for _, d := range docs {
id := string(d.ID)
if _, ok := docMap[id]; ok {
@@ -44,7 +44,7 @@ func newDocumentIteratorMatcher(docs ...doc.Document) (*documentIteratorMatcher,
// Matches returns whether the provided iterator matches the collection of provided docs.
func (m *documentIteratorMatcher) Matches(i doc.Iterator) error {
- pendingDocIDs := make(map[string]doc.Document, len(m.expectedDocs))
+ pendingDocIDs := make(map[string]doc.Metadata, len(m.expectedDocs))
for id := range m.expectedDocs {
pendingDocIDs[id] = m.expectedDocs[id]
}
diff --git a/src/m3ninx/util/docs.go b/src/m3ninx/util/docs.go
index 1be1a1fcdb..c80b60bee9 100644
--- a/src/m3ninx/util/docs.go
+++ b/src/m3ninx/util/docs.go
@@ -31,7 +31,7 @@ import (
// ReadDocs reads up to n documents from a JSON formatted file at the provided path.
// It is useful for getting a set of documents to run tests with.
-func ReadDocs(path string, n int) ([]doc.Document, error) {
+func ReadDocs(path string, n int) ([]doc.Metadata, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
@@ -39,7 +39,7 @@ func ReadDocs(path string, n int) ([]doc.Document, error) {
defer f.Close()
var (
- docs []doc.Document
+ docs []doc.Metadata
scanner = bufio.NewScanner(f)
)
for scanner.Scan() && len(docs) < n {
@@ -64,7 +64,7 @@ func ReadDocs(path string, n int) ([]doc.Document, error) {
Value: []byte(v),
})
}
- docs = append(docs, doc.Document{
+ docs = append(docs, doc.Metadata{
ID: id,
Fields: fields,
})
@@ -78,7 +78,7 @@ func ReadDocs(path string, n int) ([]doc.Document, error) {
}
// MustReadDocs calls ReadDocs and panics if there is an error.
-func MustReadDocs(path string, n int) []doc.Document {
+func MustReadDocs(path string, n int) []doc.Metadata {
docs, err := ReadDocs(path, n)
if err != nil {
panic(err)
diff --git a/src/metrics/encoding/protobuf/aggregated_encoder.go b/src/metrics/encoding/protobuf/aggregated_encoder.go
index c27c60226b..842e68664d 100644
--- a/src/metrics/encoding/protobuf/aggregated_encoder.go
+++ b/src/metrics/encoding/protobuf/aggregated_encoder.go
@@ -68,5 +68,9 @@ func (enc *aggregatedEncoder) Encode(
}
func (enc *aggregatedEncoder) Buffer() Buffer {
- return NewBuffer(enc.buf, enc.pool)
+ var fn PoolReleaseFn
+ if enc.pool != nil {
+ fn = enc.pool.Put
+ }
+ return NewBuffer(enc.buf, fn)
}
diff --git a/src/metrics/encoding/protobuf/buffer.go b/src/metrics/encoding/protobuf/buffer.go
index f6e08eb3fc..94b1f624a8 100644
--- a/src/metrics/encoding/protobuf/buffer.go
+++ b/src/metrics/encoding/protobuf/buffer.go
@@ -26,16 +26,18 @@ import (
"github.com/m3db/m3/src/x/pool"
)
+// PoolReleaseFn is a function used to release underlying slice back to bytes pool.
+type PoolReleaseFn func([]byte)
+
// Buffer contains a byte slice backed by an optional bytes pool.
type Buffer struct {
- buf []byte
- pool pool.BytesPool
- closed bool
+ buf []byte
+ finalizer PoolReleaseFn
}
// NewBuffer create a new buffer.
-func NewBuffer(buf []byte, p pool.BytesPool) Buffer {
- return Buffer{buf: buf, pool: p}
+func NewBuffer(buf []byte, p PoolReleaseFn) Buffer {
+ return Buffer{buf: buf, finalizer: p}
}
// Bytes returns the raw byte slice.
@@ -46,15 +48,11 @@ func (b *Buffer) Truncate(n int) { b.buf = b.buf[:n] }
// Close closes the buffer.
func (b *Buffer) Close() {
- if b.closed {
- return
- }
- b.closed = true
- if b.pool != nil && b.buf != nil {
- b.pool.Put(b.buf)
+ if b.finalizer != nil && b.buf != nil {
+ b.finalizer(b.buf)
}
- b.pool = nil
b.buf = nil
+ b.finalizer = nil
}
type copyDataMode int
diff --git a/src/metrics/encoding/protobuf/buffer_test.go b/src/metrics/encoding/protobuf/buffer_test.go
index 628a6e604f..df6552ef20 100644
--- a/src/metrics/encoding/protobuf/buffer_test.go
+++ b/src/metrics/encoding/protobuf/buffer_test.go
@@ -37,13 +37,11 @@ func TestBufferWithPool(t *testing.T) {
data := p.Get(16)[:16]
data[0] = 0xff
- buf := NewBuffer(data, p)
- require.Equal(t, data, buf.Bytes())
- require.False(t, buf.closed)
+ buf := NewBuffer(data, p.Put)
+ require.NotNil(t, buf.buf)
buf.Close()
- require.True(t, buf.closed)
- require.Nil(t, buf.pool)
+ require.Nil(t, buf.finalizer)
require.Nil(t, buf.buf)
// Verify that closing the buffer returns the buffer to pool.
@@ -60,11 +58,10 @@ func TestBufferNilPool(t *testing.T) {
data := make([]byte, 16)
buf := NewBuffer(data, nil)
require.Equal(t, data, buf.Bytes())
- require.False(t, buf.closed)
+ require.NotNil(t, buf.buf)
buf.Close()
- require.True(t, buf.closed)
- require.Nil(t, buf.pool)
+ require.Nil(t, buf.finalizer)
require.Nil(t, buf.buf)
}
diff --git a/src/metrics/encoding/protobuf/unaggregated_encoder.go b/src/metrics/encoding/protobuf/unaggregated_encoder.go
index cee6855a4b..d470468360 100644
--- a/src/metrics/encoding/protobuf/unaggregated_encoder.go
+++ b/src/metrics/encoding/protobuf/unaggregated_encoder.go
@@ -111,7 +111,7 @@ func (enc *unaggregatedEncoder) Truncate(n int) error {
}
func (enc *unaggregatedEncoder) Relinquish() Buffer {
- res := NewBuffer(enc.buf[:enc.used], enc.pool)
+ res := NewBuffer(enc.buf[:enc.used], enc.pool.Put)
enc.buf = nil
enc.used = 0
return res
diff --git a/src/metrics/encoding/protobuf/unaggregated_iterator.go b/src/metrics/encoding/protobuf/unaggregated_iterator.go
index 81d9038f76..d5f8a567f5 100644
--- a/src/metrics/encoding/protobuf/unaggregated_iterator.go
+++ b/src/metrics/encoding/protobuf/unaggregated_iterator.go
@@ -102,6 +102,10 @@ func (it *unaggregatedIterator) Next() bool {
it.err = fmt.Errorf("decoded message size %d is larger than supported max message size %d", size, it.maxMessageSize)
return false
}
+ if size <= 0 {
+ it.err = fmt.Errorf("decoded message size %d is zero or negative", size)
+ return false
+ }
it.ensureBufferSize(size)
if err := it.decodeMessage(size); err != nil {
return false
diff --git a/src/metrics/encoding/protobuf/unaggregated_iterator_test.go b/src/metrics/encoding/protobuf/unaggregated_iterator_test.go
index 523e923b19..4aa96d7785 100644
--- a/src/metrics/encoding/protobuf/unaggregated_iterator_test.go
+++ b/src/metrics/encoding/protobuf/unaggregated_iterator_test.go
@@ -22,6 +22,7 @@ package protobuf
import (
"bytes"
+ "encoding/binary"
"io"
"strings"
"testing"
@@ -103,7 +104,7 @@ func TestUnaggregatedIteratorDecodeBatchTimerWithMetadatas(t *testing.T) {
enc := NewUnaggregatedEncoder(NewUnaggregatedOptions())
for _, input := range inputs {
require.NoError(t, enc.EncodeMessage(encoding.UnaggregatedMessageUnion{
- Type: encoding.BatchTimerWithMetadatasType,
+ Type: encoding.BatchTimerWithMetadatasType,
BatchTimerWithMetadatas: input,
}))
}
@@ -195,7 +196,7 @@ func TestUnaggregatedIteratorDecodeForwardedMetricWithMetadata(t *testing.T) {
enc := NewUnaggregatedEncoder(NewUnaggregatedOptions())
for _, input := range inputs {
require.NoError(t, enc.EncodeMessage(encoding.UnaggregatedMessageUnion{
- Type: encoding.ForwardedMetricWithMetadataType,
+ Type: encoding.ForwardedMetricWithMetadataType,
ForwardedMetricWithMetadata: input,
}))
}
@@ -286,7 +287,7 @@ func TestUnaggregatedIteratorDecodeTimedMetricWithMetadata(t *testing.T) {
enc := NewUnaggregatedEncoder(NewUnaggregatedOptions())
for _, input := range inputs {
require.NoError(t, enc.EncodeMessage(encoding.UnaggregatedMessageUnion{
- Type: encoding.TimedMetricWithMetadataType,
+ Type: encoding.TimedMetricWithMetadataType,
TimedMetricWithMetadata: input,
}))
}
@@ -406,7 +407,7 @@ func TestUnaggregatedIteratorDecodeStress(t *testing.T) {
}
case unaggregated.BatchTimerWithMetadatas:
msg = encoding.UnaggregatedMessageUnion{
- Type: encoding.BatchTimerWithMetadatasType,
+ Type: encoding.BatchTimerWithMetadatasType,
BatchTimerWithMetadatas: input,
}
case unaggregated.GaugeWithMetadatas:
@@ -416,17 +417,17 @@ func TestUnaggregatedIteratorDecodeStress(t *testing.T) {
}
case aggregated.ForwardedMetricWithMetadata:
msg = encoding.UnaggregatedMessageUnion{
- Type: encoding.ForwardedMetricWithMetadataType,
+ Type: encoding.ForwardedMetricWithMetadataType,
ForwardedMetricWithMetadata: input,
}
case aggregated.TimedMetricWithMetadata:
msg = encoding.UnaggregatedMessageUnion{
- Type: encoding.TimedMetricWithMetadataType,
+ Type: encoding.TimedMetricWithMetadataType,
TimedMetricWithMetadata: input,
}
case aggregated.PassthroughMetricWithMetadata:
msg = encoding.UnaggregatedMessageUnion{
- Type: encoding.PassthroughMetricWithMetadataType,
+ Type: encoding.PassthroughMetricWithMetadataType,
PassthroughMetricWithMetadata: input,
}
default:
@@ -554,3 +555,20 @@ func TestUnaggregatedIteratorNextOnClose(t *testing.T) {
// Verify that closing a second time is a no op.
it.Close()
}
+
+func TestUnaggregatedIteratorNextOnInvalid(t *testing.T) {
+ buf := make([]byte, 32)
+ binary.PutVarint(buf, 0)
+ stream := bytes.NewReader(buf)
+
+ it := NewUnaggregatedIterator(stream, NewUnaggregatedOptions())
+ require.False(t, it.Next())
+ require.False(t, it.Next())
+
+ buf = make([]byte, 32)
+ binary.PutVarint(buf, -1234)
+ stream = bytes.NewReader(buf)
+ it = NewUnaggregatedIterator(stream, NewUnaggregatedOptions())
+ require.False(t, it.Next())
+ require.False(t, it.Next())
+}
diff --git a/src/metrics/generated/mocks/generate.go b/src/metrics/generated/mocks/generate.go
index 4574464705..25b6582530 100644
--- a/src/metrics/generated/mocks/generate.go
+++ b/src/metrics/generated/mocks/generate.go
@@ -19,7 +19,7 @@
// THE SOFTWARE.
// mockgen rules for generating mocks for exported interfaces (reflection mode).
-//go:generate sh -c "mockgen -package=id github.com/m3db/m3/src/metrics/metric/id ID | genclean -pkg github.com/m3db/m3/src/metrics/metric/id -out $GOPATH/src/github.com/m3db/m3/src/metrics/metric/id/id_mock.go"
+//go:generate sh -c "mockgen -package=id github.com/m3db/m3/src/metrics/metric/id ID,SortedTagIterator | genclean -pkg github.com/m3db/m3/src/metrics/metric/id -out $GOPATH/src/github.com/m3db/m3/src/metrics/metric/id/id_mock.go"
//go:generate sh -c "mockgen -package=matcher github.com/m3db/m3/src/metrics/matcher Matcher | genclean -pkg github.com/m3db/m3/src/metrics/matcher -out $GOPATH/src/github.com/m3db/m3/src/metrics/matcher/matcher_mock.go"
//go:generate sh -c "mockgen -package=protobuf github.com/m3db/m3/src/metrics/encoding/protobuf UnaggregatedEncoder | genclean -pkg github.com/m3db/m3/src/metrics/encoding/protobuf -out $GOPATH/src/github.com/m3db/m3/src/metrics/encoding/protobuf/protobuf_mock.go"
//go:generate sh -c "mockgen -package=rules github.com/m3db/m3/src/metrics/rules Store | genclean -pkg github.com/m3db/m3/src/metrics/rules -out $GOPATH/src/github.com/m3db/m3/src/metrics/rules/rules_mock.go"
diff --git a/src/metrics/matcher/match.go b/src/metrics/matcher/match.go
index b911e823ec..776428feb2 100644
--- a/src/metrics/matcher/match.go
+++ b/src/metrics/matcher/match.go
@@ -37,53 +37,95 @@ type Matcher interface {
}
type matcher struct {
- opts Options
+ namespaceResolver namespaceResolver
+ namespaces Namespaces
+ cache cache.Cache
+}
+
+type namespaceResolver struct {
namespaceTag []byte
defaultNamespace []byte
+}
- namespaces Namespaces
- cache cache.Cache
+func (r namespaceResolver) Resolve(id id.ID) []byte {
+ ns, found := id.TagValue(r.namespaceTag)
+ if !found {
+ ns = r.defaultNamespace
+ }
+ return ns
}
-// NewMatcher creates a new rule matcher.
+// NewMatcher creates a new rule matcher, optionally with a cache.
func NewMatcher(cache cache.Cache, opts Options) (Matcher, error) {
+ nsResolver := namespaceResolver{
+ namespaceTag: opts.NamespaceTag(),
+ defaultNamespace: opts.DefaultNamespace(),
+ }
+
instrumentOpts := opts.InstrumentOptions()
scope := instrumentOpts.MetricsScope()
iOpts := instrumentOpts.SetMetricsScope(scope.SubScope("namespaces"))
- namespacesOpts := opts.SetInstrumentOptions(iOpts).
- SetOnNamespaceAddedFn(func(namespace []byte, ruleSet RuleSet) {
- cache.Register(namespace, ruleSet)
- }).
- SetOnNamespaceRemovedFn(func(namespace []byte) {
- cache.Unregister(namespace)
- }).
- SetOnRuleSetUpdatedFn(func(namespace []byte, ruleSet RuleSet) {
- cache.Refresh(namespace, ruleSet)
- })
- key := opts.NamespacesKey()
- namespaces := NewNamespaces(key, namespacesOpts)
+ namespacesOpts := opts.SetInstrumentOptions(iOpts)
+
+ if cache != nil {
+ namespacesOpts = namespacesOpts.
+ SetOnNamespaceAddedFn(func(namespace []byte, ruleSet RuleSet) {
+ cache.Register(namespace, ruleSet)
+ }).
+ SetOnNamespaceRemovedFn(func(namespace []byte) {
+ cache.Unregister(namespace)
+ }).
+ SetOnRuleSetUpdatedFn(func(namespace []byte, ruleSet RuleSet) {
+ cache.Refresh(namespace, ruleSet)
+ })
+ }
+
+ namespaces := NewNamespaces(opts.NamespacesKey(), namespacesOpts)
if err := namespaces.Open(); err != nil {
return nil, err
}
+ if cache == nil {
+ return &noCacheMatcher{
+ namespaceResolver: nsResolver,
+ namespaces: namespaces,
+ }, nil
+ }
+
return &matcher{
- opts: opts,
- namespaceTag: opts.NamespaceTag(),
- defaultNamespace: opts.DefaultNamespace(),
- namespaces: namespaces,
- cache: cache,
+ namespaceResolver: nsResolver,
+ namespaces: namespaces,
+ cache: cache,
}, nil
}
-func (m *matcher) ForwardMatch(id id.ID, fromNanos, toNanos int64) rules.MatchResult {
- ns, found := id.TagValue(m.namespaceTag)
- if !found {
- ns = m.defaultNamespace
- }
- return m.cache.ForwardMatch(ns, id.Bytes(), fromNanos, toNanos)
+func (m *matcher) ForwardMatch(
+ id id.ID,
+ fromNanos, toNanos int64,
+) rules.MatchResult {
+ return m.cache.ForwardMatch(m.namespaceResolver.Resolve(id),
+ id.Bytes(), fromNanos, toNanos)
}
func (m *matcher) Close() error {
m.namespaces.Close()
return m.cache.Close()
}
+
+type noCacheMatcher struct {
+ namespaces Namespaces
+ namespaceResolver namespaceResolver
+}
+
+func (m *noCacheMatcher) ForwardMatch(
+ id id.ID,
+ fromNanos, toNanos int64,
+) rules.MatchResult {
+ return m.namespaces.ForwardMatch(m.namespaceResolver.Resolve(id),
+ id.Bytes(), fromNanos, toNanos)
+}
+
+func (m *noCacheMatcher) Close() error {
+ m.namespaces.Close()
+ return nil
+}
diff --git a/src/metrics/matcher/match_test.go b/src/metrics/matcher/match_test.go
index f5dc350286..cce42bab56 100644
--- a/src/metrics/matcher/match_test.go
+++ b/src/metrics/matcher/match_test.go
@@ -27,11 +27,20 @@ import (
"github.com/m3db/m3/src/cluster/kv"
"github.com/m3db/m3/src/cluster/kv/mem"
+ "github.com/m3db/m3/src/metrics/aggregation"
+ "github.com/m3db/m3/src/metrics/filters"
+ "github.com/m3db/m3/src/metrics/generated/proto/aggregationpb"
+ "github.com/m3db/m3/src/metrics/generated/proto/policypb"
"github.com/m3db/m3/src/metrics/generated/proto/rulepb"
"github.com/m3db/m3/src/metrics/matcher/cache"
+ "github.com/m3db/m3/src/metrics/metadata"
+ "github.com/m3db/m3/src/metrics/metric/id"
+ "github.com/m3db/m3/src/metrics/policy"
"github.com/m3db/m3/src/metrics/rules"
+ "github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/x/clock"
"github.com/m3db/m3/src/x/instrument"
+ xtest "github.com/m3db/m3/src/x/test"
"github.com/m3db/m3/src/x/watch"
"github.com/golang/mock/gomock"
@@ -73,7 +82,9 @@ func TestMatcherMatchDoesNotExist(t *testing.T) {
tagValueFn: func(tagName []byte) ([]byte, bool) { return nil, false },
}
now := time.Now()
- matcher := testMatcher(t, newMemCache())
+ matcher := testMatcher(t, testMatcherOptions{
+ cache: newMemCache(),
+ })
require.Equal(t, rules.EmptyMatchResult, matcher.ForwardMatch(id, now.UnixNano(), now.UnixNano()))
}
@@ -89,37 +100,152 @@ func TestMatcherMatchExists(t *testing.T) {
memRes = memResults{results: map[string]rules.MatchResult{"foo": res}}
)
cache := newMemCache()
- matcher := testMatcher(t, cache)
+ matcher := testMatcher(t, testMatcherOptions{
+ cache: cache,
+ })
c := cache.(*memCache)
c.namespaces[ns] = memRes
require.Equal(t, res, matcher.ForwardMatch(id, now.UnixNano(), now.UnixNano()))
}
+func TestMatcherMatchExistsNoCache(t *testing.T) {
+ ctrl := xtest.NewController(t)
+ defer ctrl.Finish()
+
+ var (
+ ns = "fooNs"
+ metric = &testMetricID{
+ id: []byte("foo"),
+ tagValueFn: func(tagName []byte) ([]byte, bool) {
+ if string(tagName) == "fooTag" {
+ return []byte("fooValue"), true
+ }
+ return []byte(ns), true
+ },
+ }
+ now = time.Now()
+ )
+ matcher := testMatcher(t, testMatcherOptions{
+ tagFilterOptions: filters.TagsFilterOptions{
+ NameAndTagsFn: func(id []byte) (name []byte, tags []byte, err error) {
+ name = metric.id
+ return
+ },
+ SortedTagIteratorFn: func(tagPairs []byte) id.SortedTagIterator {
+ iter := id.NewMockSortedTagIterator(ctrl)
+ iter.EXPECT().Next().Return(true)
+ iter.EXPECT().Current().Return([]byte("fooTag"), []byte("fooValue"))
+ iter.EXPECT().Next().Return(false)
+ iter.EXPECT().Err().Return(nil)
+ iter.EXPECT().Close()
+ return iter
+ },
+ },
+ storeSetup: func(t *testing.T, store kv.TxnStore) {
+ _, err := store.Set(testNamespacesKey, &rulepb.Namespaces{
+ Namespaces: []*rulepb.Namespace{
+ {
+ Name: ns,
+ Snapshots: []*rulepb.NamespaceSnapshot{
+ {
+ ForRulesetVersion: 1,
+ Tombstoned: false,
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+
+ _, err = store.Set("/ruleset/fooNs", &rulepb.RuleSet{
+ Namespace: ns,
+ MappingRules: []*rulepb.MappingRule{
+ {
+ Snapshots: []*rulepb.MappingRuleSnapshot{
+ {
+ Filter: "fooTag:fooValue",
+ AggregationTypes: []aggregationpb.AggregationType{
+ aggregationpb.AggregationType_LAST,
+ },
+ StoragePolicies: []*policypb.StoragePolicy{
+ {
+ Resolution: policypb.Resolution{
+ WindowSize: int64(time.Minute),
+ Precision: int64(time.Minute),
+ },
+ Retention: policypb.Retention{
+ Period: 24 * int64(time.Hour),
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ })
+ require.NoError(t, err)
+ },
+ })
+
+ forExistingID := metadata.StagedMetadatas{
+ metadata.StagedMetadata{
+ Metadata: metadata.Metadata{
+ Pipelines: metadata.PipelineMetadatas{
+ metadata.PipelineMetadata{
+ AggregationID: aggregation.MustCompressTypes(aggregation.Last),
+ StoragePolicies: policy.StoragePolicies{
+ policy.MustParseStoragePolicy("1m:1d"),
+ },
+ Tags: []models.Tag{},
+ },
+ },
+ },
+ },
+ }
+ forNewRollupIDs := []rules.IDWithMetadatas{}
+ keepOriginal := false
+ expected := rules.NewMatchResult(1, math.MaxInt64,
+ forExistingID, forNewRollupIDs, keepOriginal)
+
+ result := matcher.ForwardMatch(metric, now.UnixNano(), now.UnixNano())
+
+ require.Equal(t, expected, result)
+}
+
func TestMatcherClose(t *testing.T) {
- matcher := testMatcher(t, newMemCache())
+ matcher := testMatcher(t, testMatcherOptions{
+ cache: newMemCache(),
+ })
require.NoError(t, matcher.Close())
}
-func testMatcher(t *testing.T, cache cache.Cache) Matcher {
+type testMatcherOptions struct {
+ cache cache.Cache
+ storeSetup func(*testing.T, kv.TxnStore)
+ tagFilterOptions filters.TagsFilterOptions
+}
+
+func testMatcher(t *testing.T, opts testMatcherOptions) Matcher {
var (
- store = mem.NewStore()
- opts = NewOptions().
- SetClockOptions(clock.NewOptions()).
- SetInstrumentOptions(instrument.NewOptions()).
- SetInitWatchTimeout(100 * time.Millisecond).
- SetKVStore(store).
- SetNamespacesKey(testNamespacesKey).
- SetNamespaceTag([]byte("namespace")).
- SetDefaultNamespace([]byte("default")).
- SetRuleSetKeyFn(defaultRuleSetKeyFn).
- SetRuleSetOptions(rules.NewOptions()).
- SetMatchRangePast(0)
+ store = mem.NewStore()
+ matcherOpts = NewOptions().
+ SetClockOptions(clock.NewOptions()).
+ SetInstrumentOptions(instrument.NewOptions()).
+ SetInitWatchTimeout(100 * time.Millisecond).
+ SetKVStore(store).
+ SetNamespacesKey(testNamespacesKey).
+ SetNamespaceTag([]byte("namespace")).
+ SetDefaultNamespace([]byte("default")).
+ SetRuleSetKeyFn(defaultRuleSetKeyFn).
+ SetRuleSetOptions(rules.NewOptions().
+ SetTagsFilterOptions(opts.tagFilterOptions)).
+ SetMatchRangePast(0)
proto = &rulepb.Namespaces{
Namespaces: []*rulepb.Namespace{
- &rulepb.Namespace{
+ {
Name: "fooNs",
Snapshots: []*rulepb.NamespaceSnapshot{
- &rulepb.NamespaceSnapshot{
+ {
ForRulesetVersion: 1,
Tombstoned: true,
},
@@ -128,10 +254,15 @@ func testMatcher(t *testing.T, cache cache.Cache) Matcher {
},
}
)
+
_, err := store.SetIfNotExists(testNamespacesKey, proto)
require.NoError(t, err)
- m, err := NewMatcher(cache, opts)
+ if fn := opts.storeSetup; fn != nil {
+ fn(t, store)
+ }
+
+ m, err := NewMatcher(opts.cache, matcherOpts)
require.NoError(t, err)
return m
}
diff --git a/src/metrics/metadata/metadata.go b/src/metrics/metadata/metadata.go
index 8898930153..8c0dc2dbd8 100644
--- a/src/metrics/metadata/metadata.go
+++ b/src/metrics/metadata/metadata.go
@@ -106,6 +106,12 @@ func (m PipelineMetadata) IsDefault() bool {
m.DropPolicy.IsDefault()
}
+// IsMappingRule returns whether this is a rollup rule pipeline metadata.
+// nolint:gocritic
+func (m PipelineMetadata) IsMappingRule() bool {
+ return m.Pipeline.IsEmpty()
+}
+
// IsDropPolicyApplied returns whether this is the default standard pipeline
// but with the drop policy applied.
func (m PipelineMetadata) IsDropPolicyApplied() bool {
diff --git a/src/metrics/metric/id/id_mock.go b/src/metrics/metric/id/id_mock.go
index a2407de492..bbba1fdf27 100644
--- a/src/metrics/metric/id/id_mock.go
+++ b/src/metrics/metric/id/id_mock.go
@@ -1,7 +1,7 @@
// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/m3db/m3/src/metrics/metric/id (interfaces: ID)
+// Source: github.com/m3db/m3/src/metrics/metric/id (interfaces: ID,SortedTagIterator)
-// Copyright (c) 2018 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -81,3 +81,93 @@ func (mr *MockIDMockRecorder) TagValue(arg0 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TagValue", reflect.TypeOf((*MockID)(nil).TagValue), arg0)
}
+
+// MockSortedTagIterator is a mock of SortedTagIterator interface
+type MockSortedTagIterator struct {
+ ctrl *gomock.Controller
+ recorder *MockSortedTagIteratorMockRecorder
+}
+
+// MockSortedTagIteratorMockRecorder is the mock recorder for MockSortedTagIterator
+type MockSortedTagIteratorMockRecorder struct {
+ mock *MockSortedTagIterator
+}
+
+// NewMockSortedTagIterator creates a new mock instance
+func NewMockSortedTagIterator(ctrl *gomock.Controller) *MockSortedTagIterator {
+ mock := &MockSortedTagIterator{ctrl: ctrl}
+ mock.recorder = &MockSortedTagIteratorMockRecorder{mock}
+ return mock
+}
+
+// EXPECT returns an object that allows the caller to indicate expected use
+func (m *MockSortedTagIterator) EXPECT() *MockSortedTagIteratorMockRecorder {
+ return m.recorder
+}
+
+// Close mocks base method
+func (m *MockSortedTagIterator) Close() {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Close")
+}
+
+// Close indicates an expected call of Close
+func (mr *MockSortedTagIteratorMockRecorder) Close() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockSortedTagIterator)(nil).Close))
+}
+
+// Current mocks base method
+func (m *MockSortedTagIterator) Current() ([]byte, []byte) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Current")
+ ret0, _ := ret[0].([]byte)
+ ret1, _ := ret[1].([]byte)
+ return ret0, ret1
+}
+
+// Current indicates an expected call of Current
+func (mr *MockSortedTagIteratorMockRecorder) Current() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Current", reflect.TypeOf((*MockSortedTagIterator)(nil).Current))
+}
+
+// Err mocks base method
+func (m *MockSortedTagIterator) Err() error {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Err")
+ ret0, _ := ret[0].(error)
+ return ret0
+}
+
+// Err indicates an expected call of Err
+func (mr *MockSortedTagIteratorMockRecorder) Err() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Err", reflect.TypeOf((*MockSortedTagIterator)(nil).Err))
+}
+
+// Next mocks base method
+func (m *MockSortedTagIterator) Next() bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "Next")
+ ret0, _ := ret[0].(bool)
+ return ret0
+}
+
+// Next indicates an expected call of Next
+func (mr *MockSortedTagIteratorMockRecorder) Next() *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockSortedTagIterator)(nil).Next))
+}
+
+// Reset mocks base method
+func (m *MockSortedTagIterator) Reset(arg0 []byte) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "Reset", arg0)
+}
+
+// Reset indicates an expected call of Reset
+func (mr *MockSortedTagIteratorMockRecorder) Reset(arg0 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reset", reflect.TypeOf((*MockSortedTagIterator)(nil).Reset), arg0)
+}
diff --git a/src/query/api/v1/handler/database/create_test.go b/src/query/api/v1/handler/database/create_test.go
index a313faf858..7c8f76add1 100644
--- a/src/query/api/v1/handler/database/create_test.go
+++ b/src/query/api/v1/handler/database/create_test.go
@@ -1132,7 +1132,8 @@ func TestClusterTypeMissingHostnames(t *testing.T) {
assert.Equal(t,
xtest.MustPrettyJSONMap(t,
xjson.Map{
- "error": "missing required field",
+ "status": "error",
+ "error": "missing required field",
},
),
xtest.MustPrettyJSONString(t, string(body)))
@@ -1167,7 +1168,8 @@ func TestBadType(t *testing.T) {
assert.Equal(t,
xtest.MustPrettyJSONMap(t,
xjson.Map{
- "error": "invalid database type",
+ "status": "error",
+ "error": "invalid database type",
},
),
xtest.MustPrettyJSONString(t, string(body)))
diff --git a/src/query/api/v1/handler/graphite/render.go b/src/query/api/v1/handler/graphite/render.go
index 2ebe317b21..63055fdbf0 100644
--- a/src/query/api/v1/handler/graphite/render.go
+++ b/src/query/api/v1/handler/graphite/render.go
@@ -71,8 +71,10 @@ func NewRenderHandler(opts options.HandlerOptions) http.Handler {
wrappedStore := graphite.NewM3WrappedStorage(opts.Storage(),
opts.M3DBOptions(), opts.InstrumentOpts(), opts.GraphiteStorageOptions())
return &renderHandler{
- opts: opts,
- engine: native.NewEngine(wrappedStore),
+ opts: opts,
+ engine: native.NewEngine(wrappedStore, native.CompileOptions{
+ EscapeAllNotOnlyQuotes: opts.GraphiteStorageOptions().CompileEscapeAllNotOnlyQuotes,
+ }),
queryContextOpts: opts.QueryContextOptions(),
graphiteOpts: opts.GraphiteStorageOptions(),
}
diff --git a/src/query/api/v1/handler/namespace/add_test.go b/src/query/api/v1/handler/namespace/add_test.go
index 0f26d83596..74f1bf7acd 100644
--- a/src/query/api/v1/handler/namespace/add_test.go
+++ b/src/query/api/v1/handler/namespace/add_test.go
@@ -100,7 +100,9 @@ func TestNamespaceAddHandler(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- assert.Equal(t, "{\"error\":\"bad namespace metadata: retention options must be set\"}\n", string(body))
+ assert.JSONEq(t,
+ `{"status":"error","error":"bad namespace metadata: retention options must be set"}`,
+ string(body))
// Test good case. Note: there is no way to tell the difference between a boolean
// being false and it not being set by a user.
diff --git a/src/query/api/v1/handler/namespace/delete_test.go b/src/query/api/v1/handler/namespace/delete_test.go
index 08fd5bb1b0..268b9dcbe2 100644
--- a/src/query/api/v1/handler/namespace/delete_test.go
+++ b/src/query/api/v1/handler/namespace/delete_test.go
@@ -56,7 +56,9 @@ func TestNamespaceDeleteHandlerNotFound(t *testing.T) {
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
- assert.Equal(t, "{\"error\":\"unable to find a namespace with specified name\"}\n", string(body))
+ assert.JSONEq(t,
+ `{"status":"error","error":"unable to find a namespace with specified name"}`,
+ string(body))
}
func TestNamespaceDeleteHandlerDeleteAll(t *testing.T) {
diff --git a/src/query/api/v1/handler/namespace/schema_test.go b/src/query/api/v1/handler/namespace/schema_test.go
index 80165d2e5d..e5ceaeefd6 100644
--- a/src/query/api/v1/handler/namespace/schema_test.go
+++ b/src/query/api/v1/handler/namespace/schema_test.go
@@ -158,7 +158,7 @@ func TestSchemaDeploy_KVKeyNotFound(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
- assert.Equal(t, "{\"error\":\"namespace is not found\"}\n", string(body))
+ assert.JSONEq(t, `{"status":"error","error":"namespace is not found"}`, string(body))
}
func TestSchemaDeploy(t *testing.T) {
@@ -250,7 +250,7 @@ func TestSchemaDeploy_NamespaceNotFound(t *testing.T) {
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, http.StatusNotFound, resp.StatusCode)
- assert.Equal(t, "{\"error\":\"namespace is not found\"}\n", string(body))
+ assert.JSONEq(t, `{"status":"error","error":"namespace is not found"}`, string(body))
}
func TestSchemaReset(t *testing.T) {
diff --git a/src/query/api/v1/handler/namespace/update_test.go b/src/query/api/v1/handler/namespace/update_test.go
index 38ba2282ab..30f4aa4261 100644
--- a/src/query/api/v1/handler/namespace/update_test.go
+++ b/src/query/api/v1/handler/namespace/update_test.go
@@ -103,7 +103,9 @@ func TestNamespaceUpdateHandler(t *testing.T) {
body, err := ioutil.ReadAll(resp.Body)
assert.NoError(t, err)
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- assert.Equal(t, "{\"error\":\"unable to validate update request: update options cannot be empty\"}\n", string(body))
+ assert.JSONEq(t,
+ `{"status":"error","error":"unable to validate update request: update options cannot be empty"}`,
+ string(body))
// Test good case. Note: there is no way to tell the difference between a boolean
// being false and it not being set by a user.
diff --git a/src/query/api/v1/handler/placement/add_test.go b/src/query/api/v1/handler/placement/add_test.go
index 2d66ddc6f7..7526fa4236 100644
--- a/src/query/api/v1/handler/placement/add_test.go
+++ b/src/query/api/v1/handler/placement/add_test.go
@@ -78,7 +78,9 @@ func TestPlacementAddHandler_Force(t *testing.T) {
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
- assert.Equal(t, "{\"error\":\"no new instances found in the valid zone\"}\n", string(body))
+ assert.JSONEq(t,
+ `{"status":"error","error":"no new instances found in the valid zone"}`,
+ string(body))
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
// Test add success
@@ -137,7 +139,9 @@ func TestPlacementAddHandler_SafeErr_NoNewInstance(t *testing.T) {
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
- assert.Equal(t, "{\"error\":\"no new instances found in the valid zone\"}\n", string(body))
+ assert.JSONEq(t,
+ `{"status":"error","error":"no new instances found in the valid zone"}`,
+ string(body))
})
}
@@ -174,8 +178,8 @@ func TestPlacementAddHandler_SafeErr_NotAllAvailable(t *testing.T) {
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- assert.Equal(t,
- `{"error":"instances do not have all shards available: [A, B]"}`+"\n",
+ assert.JSONEq(t,
+ `{"status":"error","error":"instances do not have all shards available: [A, B]"}`,
string(body))
})
}
@@ -243,7 +247,7 @@ func TestPlacementAddHandler_SafeOK(t *testing.T) {
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
require.Equal(t, http.StatusInternalServerError, resp.StatusCode)
- require.Equal(t, `{"error":"test err"}`+"\n", string(body))
+ require.JSONEq(t, `{"status":"error","error":"test err"}`, string(body))
w = httptest.NewRecorder()
if serviceName == handleroptions.M3AggregatorServiceName {
diff --git a/src/query/api/v1/handler/placement/delete_test.go b/src/query/api/v1/handler/placement/delete_test.go
index 01402b4514..b8c5027d88 100644
--- a/src/query/api/v1/handler/placement/delete_test.go
+++ b/src/query/api/v1/handler/placement/delete_test.go
@@ -134,7 +134,7 @@ func TestPlacementDeleteHandler_Force(t *testing.T) {
body, err = ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, http.StatusNotFound, resp.StatusCode)
- require.Equal(t, "{\"error\":\"instance not found: nope\"}\n", string(body))
+ require.JSONEq(t, `{"status":"error","error":"instance not found: nope"}`, string(body))
})
}
@@ -267,7 +267,9 @@ func testDeleteHandlerSafe(t *testing.T, serviceName string) {
body, err = ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
- require.Equal(t, `{"error":"instances do not have all shards available: [host2]"}`+"\n", string(body))
+ require.JSONEq(t,
+ `{"status":"error","error":"instances do not have all shards available: [host2]"}`,
+ string(body))
}
// Test OK
diff --git a/src/query/api/v1/handler/placement/init_test.go b/src/query/api/v1/handler/placement/init_test.go
index 3bec2ce906..eef51e58c5 100644
--- a/src/query/api/v1/handler/placement/init_test.go
+++ b/src/query/api/v1/handler/placement/init_test.go
@@ -135,7 +135,9 @@ func TestPlacementInitHandler(t *testing.T) {
body, err = ioutil.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
- assert.Equal(t, "{\"error\":\"unable to build initial placement\"}\n", string(body))
+ assert.JSONEq(t,
+ `{"status":"error","error":"unable to build initial placement"}`,
+ string(body))
// Test error response
w = httptest.NewRecorder()
diff --git a/src/query/api/v1/handler/placement/replace_test.go b/src/query/api/v1/handler/placement/replace_test.go
index e45ff85a9d..a4d44ef7a7 100644
--- a/src/query/api/v1/handler/placement/replace_test.go
+++ b/src/query/api/v1/handler/placement/replace_test.go
@@ -91,7 +91,7 @@ func testPlacementReplaceHandlerForce(t *testing.T, serviceName string) {
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
- assert.Equal(t, `{"error":"test"}`+"\n", string(body))
+ assert.JSONEq(t, `{"status":"error","error":"test"}`, string(body))
assert.Equal(t, http.StatusInternalServerError, resp.StatusCode)
w = httptest.NewRecorder()
@@ -136,7 +136,9 @@ func testPlacementReplaceHandlerSafeErr(t *testing.T, serviceName string) {
assert.Equal(t, http.StatusOK, resp.StatusCode)
default:
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
- assert.Equal(t, `{"error":"instances do not have all shards available: [A, B]"}`+"\n", string(body))
+ assert.JSONEq(t,
+ `{"status":"error","error":"instances do not have all shards available: [A, B]"}`,
+ string(body))
}
}
diff --git a/src/query/api/v1/handler/prom/common.go b/src/query/api/v1/handler/prom/common.go
index 7c4a66a0d6..7ef1d039a1 100644
--- a/src/query/api/v1/handler/prom/common.go
+++ b/src/query/api/v1/handler/prom/common.go
@@ -89,19 +89,3 @@ func Respond(w http.ResponseWriter, data interface{}, warnings promstorage.Warni
w.WriteHeader(http.StatusOK)
w.Write(b)
}
-
-// Responds with error status code and writes error JSON to response body.
-func RespondError(w http.ResponseWriter, err error) {
- json := jsoniter.ConfigCompatibleWithStandardLibrary
- b, marshalErr := json.Marshal(&response{
- Status: statusError,
- Error: err.Error(),
- })
- if marshalErr != nil {
- xhttp.WriteError(w, marshalErr)
- return
- }
-
- w.Header().Set(xhttp.HeaderContentType, xhttp.ContentTypeJSON)
- xhttp.WriteError(w, err, xhttp.WithErrorResponse(b))
-}
diff --git a/src/query/api/v1/handler/prom/read.go b/src/query/api/v1/handler/prom/read.go
index b69cdecc39..7b89fcbf67 100644
--- a/src/query/api/v1/handler/prom/read.go
+++ b/src/query/api/v1/handler/prom/read.go
@@ -34,6 +34,7 @@ import (
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage/prometheus"
xerrors "github.com/m3db/m3/src/x/errors"
+ xhttp "github.com/m3db/m3/src/x/net/http"
"github.com/prometheus/prometheus/promql"
promstorage "github.com/prometheus/prometheus/storage"
@@ -99,13 +100,13 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fetchOptions, err := h.hOpts.FetchOptionsBuilder().NewFetchOptions(r)
if err != nil {
- RespondError(w, err)
+ xhttp.WriteError(w, err)
return
}
request, err := native.ParseRequest(ctx, r, h.opts.instant, h.hOpts)
if err != nil {
- RespondError(w, err)
+ xhttp.WriteError(w, err)
return
}
@@ -129,7 +130,7 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.logger.Error("error creating query",
zap.Error(err), zap.String("query", params.Query),
zap.Bool("instant", h.opts.instant))
- RespondError(w, xerrors.NewInvalidParamsError(err))
+ xhttp.WriteError(w, xerrors.NewInvalidParamsError(err))
return
}
defer qry.Close()
@@ -139,7 +140,7 @@ func (h *readHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
h.logger.Error("error executing query",
zap.Error(res.Err), zap.String("query", params.Query),
zap.Bool("instant", h.opts.instant))
- RespondError(w, res.Err)
+ xhttp.WriteError(w, res.Err)
return
}
diff --git a/src/query/api/v1/handler/prometheus/native/list_tags_test.go b/src/query/api/v1/handler/prometheus/native/list_tags_test.go
index 0ecc6dfa04..f33ef27b18 100644
--- a/src/query/api/v1/handler/prometheus/native/list_tags_test.go
+++ b/src/query/api/v1/handler/prometheus/native/list_tags_test.go
@@ -22,7 +22,6 @@ package native
import (
"errors"
- "fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
@@ -179,9 +178,6 @@ func TestListErrorTags(t *testing.T) {
r, err := ioutil.ReadAll(body)
require.NoError(t, err)
- ex := `{"error":"err"}`
- // NB: error handler adds a newline to the output.
- ex = fmt.Sprintf("%s\n", ex)
- require.Equal(t, ex, string(r))
+ require.JSONEq(t, `{"status":"error","error":"err"}`, string(r))
}
}
diff --git a/src/query/api/v1/handler/prometheus/remote/tag_values_test.go b/src/query/api/v1/handler/prometheus/remote/tag_values_test.go
index 89a799f39c..120d158fdf 100644
--- a/src/query/api/v1/handler/prometheus/remote/tag_values_test.go
+++ b/src/query/api/v1/handler/prometheus/remote/tag_values_test.go
@@ -204,6 +204,6 @@ func TestTagValueErrors(t *testing.T) {
read, err := ioutil.ReadAll(rr.Body)
require.NoError(t, err)
- ex := fmt.Sprintf(`{"error":"invalid path with no name present"}%s`, "\n")
- assert.Equal(t, ex, string(read))
+ ex := `{"status":"error","error":"invalid path with no name present"}`
+ assert.JSONEq(t, ex, string(read))
}
diff --git a/src/query/api/v1/handler/prometheus/remote/write.go b/src/query/api/v1/handler/prometheus/remote/write.go
index 6850e4e8eb..7e11b60130 100644
--- a/src/query/api/v1/handler/prometheus/remote/write.go
+++ b/src/query/api/v1/handler/prometheus/remote/write.go
@@ -215,63 +215,22 @@ func (m *promWriteMetrics) incError(err error) {
}
func newPromWriteMetrics(scope tally.Scope) (promWriteMetrics, error) {
- upTo1sBuckets, err := tally.LinearDurationBuckets(0, 100*time.Millisecond, 10)
+ buckets, err := ingest.NewLatencyBuckets()
if err != nil {
return promWriteMetrics{}, err
}
-
- upTo10sBuckets, err := tally.LinearDurationBuckets(time.Second, 500*time.Millisecond, 18)
- if err != nil {
- return promWriteMetrics{}, err
- }
-
- upTo60sBuckets, err := tally.LinearDurationBuckets(10*time.Second, 5*time.Second, 11)
- if err != nil {
- return promWriteMetrics{}, err
- }
-
- upTo60mBuckets, err := tally.LinearDurationBuckets(0, 5*time.Minute, 12)
- if err != nil {
- return promWriteMetrics{}, err
- }
- upTo60mBuckets = upTo60mBuckets[1:] // Remove the first 0s to get 5 min aligned buckets
-
- upTo6hBuckets, err := tally.LinearDurationBuckets(time.Hour, 30*time.Minute, 12)
- if err != nil {
- return promWriteMetrics{}, err
- }
-
- upTo24hBuckets, err := tally.LinearDurationBuckets(6*time.Hour, time.Hour, 19)
- if err != nil {
- return promWriteMetrics{}, err
- }
- upTo24hBuckets = upTo24hBuckets[1:] // Remove the first 6h to get 1 hour aligned buckets
-
- var writeLatencyBuckets tally.DurationBuckets
- writeLatencyBuckets = append(writeLatencyBuckets, upTo1sBuckets...)
- writeLatencyBuckets = append(writeLatencyBuckets, upTo10sBuckets...)
- writeLatencyBuckets = append(writeLatencyBuckets, upTo60sBuckets...)
- writeLatencyBuckets = append(writeLatencyBuckets, upTo60mBuckets...)
-
- var ingestLatencyBuckets tally.DurationBuckets
- ingestLatencyBuckets = append(ingestLatencyBuckets, upTo1sBuckets...)
- ingestLatencyBuckets = append(ingestLatencyBuckets, upTo10sBuckets...)
- ingestLatencyBuckets = append(ingestLatencyBuckets, upTo60sBuckets...)
- ingestLatencyBuckets = append(ingestLatencyBuckets, upTo60mBuckets...)
- ingestLatencyBuckets = append(ingestLatencyBuckets, upTo6hBuckets...)
- ingestLatencyBuckets = append(ingestLatencyBuckets, upTo24hBuckets...)
return promWriteMetrics{
writeSuccess: scope.SubScope("write").Counter("success"),
writeErrorsServer: scope.SubScope("write").Tagged(map[string]string{"code": "5XX"}).Counter("errors"),
writeErrorsClient: scope.SubScope("write").Tagged(map[string]string{"code": "4XX"}).Counter("errors"),
- writeBatchLatency: scope.SubScope("write").Histogram("batch-latency", writeLatencyBuckets),
- writeBatchLatencyBuckets: writeLatencyBuckets,
- ingestLatency: scope.SubScope("ingest").Histogram("latency", ingestLatencyBuckets),
- ingestLatencyBuckets: ingestLatencyBuckets,
+ writeBatchLatency: scope.SubScope("write").Histogram("batch-latency", buckets.WriteLatencyBuckets),
+ writeBatchLatencyBuckets: buckets.WriteLatencyBuckets,
+ ingestLatency: scope.SubScope("ingest").Histogram("latency", buckets.IngestLatencyBuckets),
+ ingestLatencyBuckets: buckets.IngestLatencyBuckets,
forwardSuccess: scope.SubScope("forward").Counter("success"),
forwardErrors: scope.SubScope("forward").Counter("errors"),
forwardDropped: scope.SubScope("forward").Counter("dropped"),
- forwardLatency: scope.SubScope("forward").Histogram("latency", writeLatencyBuckets),
+ forwardLatency: scope.SubScope("forward").Histogram("latency", buckets.WriteLatencyBuckets),
}, nil
}
diff --git a/src/query/api/v1/handler/topic/init_test.go b/src/query/api/v1/handler/topic/init_test.go
index 78085bec2b..7e33e8d8ad 100644
--- a/src/query/api/v1/handler/topic/init_test.go
+++ b/src/query/api/v1/handler/topic/init_test.go
@@ -98,5 +98,5 @@ func TestPlacementInitHandler(t *testing.T) {
body, err = ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, http.StatusInternalServerError, resp.StatusCode)
- require.Equal(t, "{\"error\":\"init error\"}\n", string(body))
+ require.JSONEq(t, `{"status":"error","error":"init error"}`, string(body))
}
diff --git a/src/query/generated/assets/openapi/assets.go b/src/query/generated/assets/openapi/assets.go
index 2f4409dffe..4f5e461d9c 100644
--- a/src/query/generated/assets/openapi/assets.go
+++ b/src/query/generated/assets/openapi/assets.go
@@ -239,7 +239,7 @@ d8dUBmQZxiF/+uI1I7E8TMF6pCyWxDpY7WPA8pmKZsl6ouawOaOS+Sj+BQAA//8by2IcfAIAAA==
"/spec.yml": {
local: "openapi/spec.yml",
- size: 22779,
+ size: 22816,
modtime: 12345,
compressed: `
H4sIAAAAAAAC/+xcX2/bOBJ/96fgqvdw+5A4bXp7gN+cOJsaSNMgyS5wuzhgaXIkc1ciVXKYNF3cdz9Q
@@ -275,9 +275,9 @@ FK7RbZ4sol8Ss+4Ap7xuumWQuqSNN3CGMCpMVkXya0sNwl+VbOpjH0EEc2wKGkgeKyGxQZmpzirVmuY3
ewhRM8KSEBcUN8bb/WU/7VRvaaw0tkld973D953B5wlf4fR5n7FsEZ6XdTxB7W4eriXOIEVoIJu0HGHu
PMcoqxlMm+K3qJvXVCqzbeF0Onx/axUr2wthy9sJ0kbpzSPiTa+n99Px1fS36fWlt7w4/nU8vRqfXV1k
V64uxr8uJCper9gLkW5V1tZWRraJVZpBq7Yl94WMb86L1sRe1eXk2gg3SbtWorZdKj7H7xCtEOiDkME0
-O2nvHrbq5UYlF5zitwimrWv0syMrfwzZZce6kq9MSOXJ8Tabo+tm2kguDhpO4FZlblHDQsVo6OWvsNAa
-3KKRftb1VjiUqWzBNxzFNLDa2VIuz9F7gtkHlWLrrGhLq8Rjs4vwJQaGwO+SH351SEt6EHMD+oOyehue
-/KD224ZSzjUYs2O/84oN7f5DXP0kZ5uS0PagpuJRaOcDhoKO/wcAAP//ae6OF/tYAAA=
+O2nvHrbq5UYlF5zitwimrWv0syMrfwzZwTBXOK1pYoeV0kqZyuPlbXZQ183cklwcNBzTrWrhotCFitHQ
+y19hoTW4Rbf9rIuycHJT2advOK9poL6zpVyeyPeExQ8qBeBZ0ZZWicdmF+FLDAyB3yW/DuuQljQq5gb0
+B2X1NmT6Qe23V6WcazBmx6boFbve/Ye4+nHPNiWh7WlOxfPSzqcQBR3/DwAA//8beZKQIFkAAA==
`,
},
diff --git a/src/query/generated/assets/openapi/spec.yml b/src/query/generated/assets/openapi/spec.yml
index b7fe173fb4..7a0194e5d9 100644
--- a/src/query/generated/assets/openapi/spec.yml
+++ b/src/query/generated/assets/openapi/spec.yml
@@ -856,6 +856,8 @@ definitions:
GenericError:
type: "object"
properties:
+ status:
+ type: "string"
error:
type: "string"
DatabaseCreateRequest:
diff --git a/src/query/graphite/lexer/lexer.go b/src/query/graphite/lexer/lexer.go
index 2f4597db58..02dfb28538 100644
--- a/src/query/graphite/lexer/lexer.go
+++ b/src/query/graphite/lexer/lexer.go
@@ -134,16 +134,27 @@ type Lexer struct {
pos int
width int
reservedIdentifiers map[string]TokenType
+ opts Options
}
const (
eof rune = 0
)
+// Options allows for specifying lexer options.
+type Options struct {
+ EscapeAllNotOnlyQuotes bool
+}
+
// NewLexer returns a lexer and an output channel for tokens.
-func NewLexer(s string, reservedIdentifiers map[string]TokenType) (*Lexer, chan *Token) {
+func NewLexer(s string, reservedIdentifiers map[string]TokenType, opts Options) (*Lexer, chan *Token) {
tokens := make(chan *Token)
- return &Lexer{s: s, tokens: tokens, reservedIdentifiers: reservedIdentifiers}, tokens
+ return &Lexer{
+ s: s,
+ tokens: tokens,
+ reservedIdentifiers: reservedIdentifiers,
+ opts: opts,
+ }, tokens
}
// Run consumes the input to produce a token stream.
@@ -377,8 +388,18 @@ func (l *Lexer) quotedString(quoteMark rune) bool {
continue
}
- if escaped && strings.ContainsRune(digits, r) {
- // if backslash is followed by a digit, we add the backslash back
+ // By default we only need escaping for quotes and treat
+ // backslashes as regular backslashes (i.e. for use in regexp
+ // with aliasSub, etc) and as such restore backslash as long not
+ // escaping a quote.
+ restoreBackslash := escaped && r != quoteMark
+ if l.opts.EscapeAllNotOnlyQuotes {
+ // If escaping all characters not just quotes then only restore
+ // backslash if using it for regex group replacement (i.e. "\1").
+ restoreBackslash = escaped && strings.ContainsRune(digits, r)
+ }
+ if restoreBackslash {
+ // If backslash not being used to escape quote then keep it.
s = append(s, '\\')
}
diff --git a/src/query/graphite/lexer/lexer_test.go b/src/query/graphite/lexer/lexer_test.go
index 2a3ac53a22..4dcb00dae3 100644
--- a/src/query/graphite/lexer/lexer_test.go
+++ b/src/query/graphite/lexer/lexer_test.go
@@ -64,13 +64,13 @@ func TestLexer(t *testing.T) {
// no need to escape single quotes within double quoted string
{"\"Whatever 'man'\"", []Token{{String, "Whatever 'man'"}}, nil},
// escape double quotes within double quoted string
- {"\"Whatever \\\"man\\\"\"", []Token{{String, "Whatever \"man\""}}, nil},
+ {`"Whatever \"man\""`, []Token{{String, "Whatever \"man\""}}, nil},
{"'Whatever man'", []Token{{String, "Whatever man"}}, nil}, // single quoted string
// no need to escape double quote within single quoted strings (start boundary), but may
// do it if so desired (end boundary)
- {"'Whatever \"man\\\"'", []Token{{String, "Whatever \"man\""}}, nil},
+ {`'Whatever "man\"'`, []Token{{String, "Whatever \"man\\\""}}, nil},
{" call09(a.{b,c,d}.node[0-2].qux.*, a{e,g}, 4546.abc, 45ahj, " +
- "\"Hello there \\\"Good \\\\ Sir\\\" \", +20, 39540.459,-349845,.393) ",
+ `"Hello there \"Good \ Sir\" ", +20, 39540.459,-349845,.393) `,
[]Token{
{Identifier, "call09"},
{LParenthesis, "("},
@@ -122,7 +122,7 @@ func TestLexer(t *testing.T) {
}
for _, test := range lexerTestInput {
- lex, tokens := NewLexer(test.input, test.reservedIdents)
+ lex, tokens := NewLexer(test.input, test.reservedIdents, Options{})
go lex.Run()
i := 0
@@ -156,7 +156,7 @@ func TestLexerErrors(t *testing.T) {
}
for _, badLine := range badLines {
- l, tokens := NewLexer(badLine[0], nil)
+ l, tokens := NewLexer(badLine[0], nil, Options{})
go l.Run()
expected := &Token{Error, badLine[1]}
diff --git a/src/query/graphite/native/aggregation_functions.go b/src/query/graphite/native/aggregation_functions.go
index a0cd8e569c..ac72670ae8 100644
--- a/src/query/graphite/native/aggregation_functions.go
+++ b/src/query/graphite/native/aggregation_functions.go
@@ -88,6 +88,13 @@ func minSeries(ctx *common.Context, series multiplePathSpecs) (ts.SeriesList, er
return combineSeries(ctx, series, wrapPathExpr(minSeriesFnName, ts.SeriesList(series)), ts.Min)
}
+// powSeries takes a list of series and returns a new series containing the
+// pow value across the series at each datapoint
+// nolint: gocritic
+func powSeries(ctx *common.Context, series multiplePathSpecs) (ts.SeriesList, error) {
+ return combineSeries(ctx, series, wrapPathExpr(powSeriesFnName, ts.SeriesList(series)), ts.Pow)
+}
+
// maxSeries takes a list of series and returns a new series containing the
// maximum value across the series at each datapoint
func maxSeries(ctx *common.Context, series multiplePathSpecs) (ts.SeriesList, error) {
@@ -431,7 +438,10 @@ func chunkArrayHelper(slice []string, numChunks int) [][]string {
}
func evaluateTarget(ctx *common.Context, target string) (ts.SeriesList, error) {
- eng := NewEngine(ctx.Engine.Storage())
+ eng, ok := ctx.Engine.(*Engine)
+ if !ok {
+ return ts.NewSeriesList(), fmt.Errorf("engine not native engine")
+ }
expression, err := eng.Compile(target)
if err != nil {
return ts.NewSeriesList(), err
diff --git a/src/query/graphite/native/aggregation_functions_test.go b/src/query/graphite/native/aggregation_functions_test.go
index cfdd9d1cd5..0510e0f728 100644
--- a/src/query/graphite/native/aggregation_functions_test.go
+++ b/src/query/graphite/native/aggregation_functions_test.go
@@ -140,7 +140,7 @@ func TestStdDevSeries(t *testing.T) {
var (
ctrl = xgomock.NewController(t)
store = storage.NewMockStorage(ctrl)
- engine = NewEngine(store)
+ engine = NewEngine(store, CompileOptions{})
start, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:41:19 GMT")
end, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:43:19 GMT")
ctx = common.NewContext(common.ContextOptions{Start: start, End: end, Engine: engine})
@@ -166,6 +166,40 @@ func TestStdDevSeries(t *testing.T) {
common.CompareOutputsAndExpected(t, 60000, start, expectedResults, result.Values)
}
+func TestPowSeries(t *testing.T) {
+ var (
+ ctrl = xgomock.NewController(t)
+ store = storage.NewMockStorage(ctrl)
+ now = time.Now().Truncate(time.Hour)
+ engine = NewEngine(store, CompileOptions{})
+ startTime = now.Add(-3 * time.Minute)
+ endTime = now.Add(-time.Minute)
+ ctx = common.NewContext(common.ContextOptions{
+ Start: startTime,
+ End: endTime,
+ Engine: engine,
+ })
+ )
+
+ fakeSeries1 := ts.NewSeries(ctx, "foo.bar.g.zed.g", startTime,
+ common.NewTestSeriesValues(ctx, 60000, []float64{0, 1, 2, 3, 4}))
+ fakeSeries2 := ts.NewSeries(ctx, "foo.bar.g.zed.g", startTime,
+ common.NewTestSeriesValues(ctx, 60000, []float64{2, 4, 1, 3, 3}))
+ fakeSeries3 := ts.NewSeries(ctx, "foo.bar.g.zed.g", startTime,
+ common.NewTestSeriesValues(ctx, 60000, []float64{5, 4, 3, 2, 1}))
+
+ listOfFakeSeries := []*ts.Series{fakeSeries1, fakeSeries2, fakeSeries3}
+
+ expectedValues := []float64{0, 1, 8, 729, 64}
+ result, err := powSeries(ctx, multiplePathSpecs(singlePathSpec{Values: listOfFakeSeries}))
+ if err != nil {
+ fmt.Println(err)
+ }
+ for i := 0; i < result.Values[0].Len(); i++ {
+ require.Equal(t, result.Values[0].ValueAt(i), expectedValues[i])
+ }
+}
+
func TestAggregate(t *testing.T) {
testAggregatedSeries(t, func(ctx *common.Context, series multiplePathSpecs) (ts.SeriesList, error) {
return aggregate(ctx, singlePathSpec(series), "sum")
@@ -180,7 +214,7 @@ func TestAggregateSeriesMedian(t *testing.T) {
var (
ctrl = xgomock.NewController(t)
store = storage.NewMockStorage(ctrl)
- engine = NewEngine(store)
+ engine = NewEngine(store, CompileOptions{})
start, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:41:19 GMT")
end, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:43:19 GMT")
ctx = common.NewContext(common.ContextOptions{Start: start, End: end, Engine: engine})
@@ -231,7 +265,7 @@ func (e mockEngine) Storage() storage.Storage {
}
func TestVariadicSumSeries(t *testing.T) {
- expr, err := Compile("sumSeries(foo.bar.*, foo.baz.*)")
+ expr, err := Compile("sumSeries(foo.bar.*, foo.baz.*)", CompileOptions{})
require.NoError(t, err)
ctx := common.NewTestContext()
ctx.Engine = mockEngine{fn: func(
@@ -489,7 +523,7 @@ func TestApplyByNode(t *testing.T) {
var (
ctrl = xgomock.NewController(t)
store = storage.NewMockStorage(ctrl)
- engine = NewEngine(store)
+ engine = NewEngine(store, CompileOptions{})
start, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:41:19 GMT")
end, _ = time.Parse(time.RFC1123, "Mon, 27 Jul 2015 19:43:19 GMT")
ctx = common.NewContext(common.ContextOptions{Start: start, End: end, Engine: engine})
diff --git a/src/query/graphite/native/builtin_functions.go b/src/query/graphite/native/builtin_functions.go
index bc65fc74d6..2d5073208f 100644
--- a/src/query/graphite/native/builtin_functions.go
+++ b/src/query/graphite/native/builtin_functions.go
@@ -2471,6 +2471,7 @@ func init() {
MustRegisterFunction(perSecond).WithDefaultParams(map[uint8]interface{}{
2: math.NaN(), // maxValue
})
+ MustRegisterFunction(powSeries)
MustRegisterFunction(rangeOfSeries)
MustRegisterFunction(randomWalkFunction).WithDefaultParams(map[uint8]interface{}{
2: 60, // step
diff --git a/src/query/graphite/native/builtin_functions_test.go b/src/query/graphite/native/builtin_functions_test.go
index 68e0108b42..41dfcd3a36 100644
--- a/src/query/graphite/native/builtin_functions_test.go
+++ b/src/query/graphite/native/builtin_functions_test.go
@@ -307,7 +307,7 @@ func TestUseSeriesAbove(t *testing.T) {
ctrl = xgomock.NewController(t)
store = storage.NewMockStorage(ctrl)
now = time.Now().Truncate(time.Hour)
- engine = NewEngine(store)
+ engine = NewEngine(store, CompileOptions{})
startTime = now.Add(-3 * time.Minute)
endTime = now.Add(-time.Minute)
ctx = common.NewContext(common.ContextOptions{Start: startTime, End: endTime, Engine: engine})
@@ -743,14 +743,12 @@ func testMovingFunction(t *testing.T, target, expectedName string, values, boots
ctx := common.NewTestContext()
defer ctx.Close()
- engine := NewEngine(
- &common.MovingFunctionStorage{
- StepMillis: 10000,
- Bootstrap: bootstrap,
- BootstrapStart: testMovingFunctionBootstrap,
- Values: values,
- },
- )
+ engine := NewEngine(&common.MovingFunctionStorage{
+ StepMillis: 10000,
+ Bootstrap: bootstrap,
+ BootstrapStart: testMovingFunctionBootstrap,
+ Values: values,
+ }, CompileOptions{})
phonyContext := common.NewContext(common.ContextOptions{
Start: testMovingFunctionStart,
End: testMovingFunctionEnd,
@@ -783,12 +781,10 @@ func testGeneralFunction(t *testing.T, target, expectedName string, values, outp
ctx := common.NewTestContext()
defer ctx.Close()
- engine := NewEngine(
- &common.MovingFunctionStorage{
- StepMillis: 60000,
- Values: values,
- },
- )
+ engine := NewEngine(&common.MovingFunctionStorage{
+ StepMillis: 60000,
+ Values: values,
+ }, CompileOptions{})
phonyContext := common.NewContext(common.ContextOptions{
Start: testGeneralFunctionStart,
End: testGeneralFunctionEnd,
@@ -817,7 +813,7 @@ func TestCombineBootstrapWithOriginal(t *testing.T) {
ctx = common.NewContext(common.ContextOptions{
Start: contextStart,
End: contextEnd,
- Engine: NewEngine(&common.MovingFunctionStorage{}),
+ Engine: NewEngine(&common.MovingFunctionStorage{}, CompileOptions{}),
})
originalStart = time.Date(2020, time.October, 5, 1, 16, 00, 0, time.UTC)
@@ -901,14 +897,12 @@ func testMovingFunctionError(t *testing.T, target string) {
ctx := common.NewTestContext()
defer ctx.Close()
- engine := NewEngine(
- &common.MovingFunctionStorage{
- StepMillis: 10000,
- Bootstrap: []float64{1.0},
- BootstrapStart: testMovingFunctionBootstrap,
- Values: []float64{1.0},
- },
- )
+ engine := NewEngine(&common.MovingFunctionStorage{
+ StepMillis: 10000,
+ Bootstrap: []float64{1.0},
+ BootstrapStart: testMovingFunctionBootstrap,
+ Values: []float64{1.0},
+ }, CompileOptions{})
phonyContext := common.NewContext(common.ContextOptions{
Start: testMovingFunctionStart,
End: testMovingFunctionEnd,
@@ -2513,9 +2507,7 @@ func (*mockStorage) FetchByQuery(
func TestHoltWintersForecast(t *testing.T) {
ctx := common.NewTestContext()
- ctx.Engine = NewEngine(
- &mockStorage{},
- )
+ ctx.Engine = NewEngine(&mockStorage{}, CompileOptions{})
defer ctx.Close()
now := ctx.StartTime
@@ -2565,9 +2557,7 @@ func TestHoltWintersForecast(t *testing.T) {
func TestHoltWintersConfidenceBands(t *testing.T) {
ctx := common.NewTestContext()
- ctx.Engine = NewEngine(
- &mockStorage{},
- )
+ ctx.Engine = NewEngine(&mockStorage{}, CompileOptions{})
defer ctx.Close()
now := ctx.StartTime
@@ -2627,9 +2617,7 @@ func TestHoltWintersConfidenceBands(t *testing.T) {
func TestHoltWintersAberration(t *testing.T) {
ctx := common.NewTestContext()
- ctx.Engine = NewEngine(
- &mockStorage{},
- )
+ ctx.Engine = NewEngine(&mockStorage{}, CompileOptions{})
defer ctx.Close()
now := ctx.StartTime
@@ -3016,7 +3004,7 @@ func TestMovingMedian(t *testing.T) {
store := storage.NewMockStorage(ctrl)
now := time.Now().Truncate(time.Hour)
- engine := NewEngine(store)
+ engine := NewEngine(store, CompileOptions{})
startTime := now.Add(-3 * time.Minute)
endTime := now.Add(-time.Minute)
ctx := common.NewContext(common.ContextOptions{Start: startTime, End: endTime, Engine: engine})
@@ -3044,7 +3032,7 @@ func TestMovingAverage(t *testing.T) {
store := storage.NewMockStorage(ctrl)
now := time.Now().Truncate(time.Hour)
- engine := NewEngine(store)
+ engine := NewEngine(store, CompileOptions{})
startTime := now.Add(-3 * time.Minute)
endTime := now.Add(-1 * time.Minute)
ctx := common.NewContext(common.ContextOptions{Start: startTime, End: endTime, Engine: engine})
@@ -3311,7 +3299,7 @@ func TestTimeShift(t *testing.T) {
store := storage.NewMockStorage(ctrl)
now := time.Now().Truncate(time.Hour)
- engine := NewEngine(store)
+ engine := NewEngine(store, CompileOptions{})
startTime := now.Add(-3 * time.Minute)
endTime := now.Add(-time.Minute)
ctx := common.NewContext(common.ContextOptions{
@@ -3366,12 +3354,10 @@ func testDelay(t *testing.T, target, expectedName string, values, output []float
ctx := common.NewTestContext()
defer ctx.Close()
- engine := NewEngine(
- &common.MovingFunctionStorage{
- StepMillis: 10000,
- Values: values,
- },
- )
+ engine := NewEngine(&common.MovingFunctionStorage{
+ StepMillis: 10000,
+ Values: values,
+ }, CompileOptions{})
phonyContext := common.NewContext(common.ContextOptions{
Start: testDelayStart,
End: testDelayEnd,
@@ -3542,6 +3528,7 @@ func TestFunctionsRegistered(t *testing.T) {
"offset",
"offsetToZero",
"perSecond",
+ "powSeries",
"randomWalk",
"randomWalkFunction",
"rangeOfSeries",
diff --git a/src/query/graphite/native/compiler.go b/src/query/graphite/native/compiler.go
index fec31a438b..5aff74da8f 100644
--- a/src/query/graphite/native/compiler.go
+++ b/src/query/graphite/native/compiler.go
@@ -30,13 +30,20 @@ import (
"github.com/m3db/m3/src/query/graphite/lexer"
)
+// CompileOptions allows for specifying compile options.
+type CompileOptions struct {
+ EscapeAllNotOnlyQuotes bool
+}
+
// Compile converts an input stream into the corresponding Expression.
-func Compile(input string) (Expression, error) {
+func Compile(input string, opts CompileOptions) (Expression, error) {
booleanLiterals := map[string]lexer.TokenType{
"true": lexer.True,
"false": lexer.False,
}
- lex, tokens := lexer.NewLexer(input, booleanLiterals)
+ lex, tokens := lexer.NewLexer(input, booleanLiterals, lexer.Options{
+ EscapeAllNotOnlyQuotes: opts.EscapeAllNotOnlyQuotes,
+ })
go lex.Run()
lookforward := newTokenLookforward(tokens)
@@ -327,7 +334,7 @@ func (c *compiler) errorf(msg string, args ...interface{}) error {
// ExtractFetchExpressions extracts timeseries fetch expressions from the given query
func ExtractFetchExpressions(s string) ([]string, error) {
- expr, err := Compile(s)
+ expr, err := Compile(s, CompileOptions{})
if err != nil {
return nil, err
}
diff --git a/src/query/graphite/native/compiler_test.go b/src/query/graphite/native/compiler_test.go
index 589bdf1ee3..16edd9943a 100644
--- a/src/query/graphite/native/compiler_test.go
+++ b/src/query/graphite/native/compiler_test.go
@@ -276,7 +276,7 @@ func TestCompile1(t *testing.T) {
}
for _, test := range tests {
- expr, err := Compile(test.input)
+ expr, err := Compile(test.input, CompileOptions{})
require.Nil(t, err, "error compiling: expression='%s', error='%v'", test.input, err)
require.NotNil(t, expr)
assertExprTree(t, test.result, expr, fmt.Sprintf("invalid result for %s: %v vs %v",
@@ -375,7 +375,7 @@ func TestCompileErrors(t *testing.T) {
}
for _, test := range tests {
- expr, err := Compile(test.input)
+ expr, err := Compile(test.input, CompileOptions{})
require.NotNil(t, err, "no error for %s", test.input)
assert.Equal(t, test.err, err.Error(), "wrong error for %s", test.input)
assert.Nil(t, expr, "non-nil expression for %s", test.input)
diff --git a/src/query/graphite/native/engine.go b/src/query/graphite/native/engine.go
index 34a6efb643..dd9459cc5d 100644
--- a/src/query/graphite/native/engine.go
+++ b/src/query/graphite/native/engine.go
@@ -27,15 +27,17 @@ import (
// The Engine for running queries.
type Engine struct {
- storage storage.Storage
+ storage storage.Storage
+ compileOpts CompileOptions
}
// NewEngine creates a new query engine.
-func NewEngine(store storage.Storage) *Engine {
+func NewEngine(store storage.Storage, compileOpts CompileOptions) *Engine {
// TODO: take pooling details from config
// (https://github.com/m3db/m3/issues/2092)
return &Engine{
- storage: store,
+ storage: store,
+ compileOpts: compileOpts,
}
}
@@ -50,7 +52,7 @@ func (e *Engine) FetchByQuery(
// Compile compiles an expression from an expression string
func (e *Engine) Compile(s string) (Expression, error) {
- return Compile(s)
+ return Compile(s, e.compileOpts)
}
// Storage returns the engine's storage object
diff --git a/src/query/graphite/native/engine_test.go b/src/query/graphite/native/engine_test.go
index 55d2c8b5c2..ef6de01085 100644
--- a/src/query/graphite/native/engine_test.go
+++ b/src/query/graphite/native/engine_test.go
@@ -111,7 +111,7 @@ func TestExecute(t *testing.T) {
defer ctrl.Finish()
store := storage.NewMockStorage(ctrl)
- engine := NewEngine(store)
+ engine := NewEngine(store, CompileOptions{})
tests := []queryTest{
{"foo.bar.q.zed", true, []queryTestResult{{"foo.bar.q.zed", "foo.bar.q.zed", 0}}},
@@ -171,7 +171,7 @@ func TestTracing(t *testing.T) {
store := storage.NewMockStorage(ctrl)
- engine := NewEngine(store)
+ engine := NewEngine(store, CompileOptions{})
var traces []common.Trace
ctx := common.NewContext(common.ContextOptions{Start: time.Now().Add(-1 * time.Hour), End: time.Now(), Engine: engine})
@@ -229,7 +229,7 @@ func TestNilBinaryContextShifter(t *testing.T) {
store := storage.NewMockStorage(ctrl)
- engine := NewEngine(store)
+ engine := NewEngine(store, CompileOptions{})
ctx := common.NewContext(common.ContextOptions{Start: time.Now().Add(-1 * time.Hour), End: time.Now(), Engine: engine})
diff --git a/src/query/graphite/native/functions.go b/src/query/graphite/native/functions.go
index b0109922ac..d1b73e5141 100644
--- a/src/query/graphite/native/functions.go
+++ b/src/query/graphite/native/functions.go
@@ -61,6 +61,7 @@ const (
minSeriesFnName = "minSeries"
multiplyFnName = "multiply"
multiplySeriesFnName = "multiplySeries"
+ powSeriesFnName = "powSeries"
rangeFnName = "range"
rangeOfFnName = "rangeOf"
rangeOfSeriesFnName = "rangeOfSeries"
diff --git a/src/query/graphite/storage/converter.go b/src/query/graphite/storage/converter.go
index 45e91ec04b..df560ee47c 100644
--- a/src/query/graphite/storage/converter.go
+++ b/src/query/graphite/storage/converter.go
@@ -35,6 +35,19 @@ func convertMetricPartToMatcher(
) (models.Matcher, error) {
var matchType models.MatchType
if metric == wildcard {
+ if count == 0 {
+ // Match field does not actually match all values
+ // for the first metric selector in practice.
+ // Need to special case this and just use a regexp match all
+ // on this first value.
+ // This is ok since there usually a very small amount of distinct
+ // values in the first dot separator.
+ return models.Matcher{
+ Type: models.MatchRegexp,
+ Name: graphite.TagName(count),
+ Value: []byte(".*"),
+ }, nil
+ }
return models.Matcher{
Type: models.MatchField,
Name: graphite.TagName(count),
diff --git a/src/query/graphite/storage/converter_test.go b/src/query/graphite/storage/converter_test.go
index 80955134b6..88460d5fde 100644
--- a/src/query/graphite/storage/converter_test.go
+++ b/src/query/graphite/storage/converter_test.go
@@ -52,6 +52,13 @@ func TestConvertWildcardToMatcher(t *testing.T) {
Type: models.MatchField,
Name: graphite.TagName(i),
}
+ if i == 0 {
+ expected = models.Matcher{
+ Type: models.MatchRegexp,
+ Name: graphite.TagName(0),
+ Value: []byte(".*"),
+ }
+ }
actual, err := convertMetricPartToMatcher(i, metric)
require.NoError(t, err)
diff --git a/src/query/graphite/storage/m3_wrapper.go b/src/query/graphite/storage/m3_wrapper.go
index ec4845219c..7b773e7ba6 100644
--- a/src/query/graphite/storage/m3_wrapper.go
+++ b/src/query/graphite/storage/m3_wrapper.go
@@ -25,9 +25,11 @@ import (
"errors"
"fmt"
"math"
+ "strings"
"sync"
"time"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/query/block"
xctx "github.com/m3db/m3/src/query/graphite/context"
"github.com/m3db/m3/src/query/graphite/graphite"
@@ -66,6 +68,7 @@ type M3WrappedStorageOptions struct {
RenderPartialStart bool
RenderPartialEnd bool
RenderSeriesAllNaNs bool
+ CompileEscapeAllNotOnlyQuotes bool
}
type seriesMetadata struct {
@@ -93,6 +96,31 @@ func NewM3WrappedStorage(
func TranslateQueryToMatchersWithTerminator(
query string,
) (models.Matchers, error) {
+ if strings.Contains(query, "**") {
+ // First add matcher to ensure it's a graphite metric with __g0__ tag.
+ hasFirstPathMatcher, err := convertMetricPartToMatcher(0, wildcard)
+ if err != nil {
+ return nil, err
+ }
+ // Need to regexp on the entire ID since ** matches over different
+ // graphite path dimensions.
+ globOpts := graphite.GlobOptions{
+ AllowMatchAll: true,
+ }
+ idRegexp, _, err := graphite.ExtendedGlobToRegexPattern(query, globOpts)
+ if err != nil {
+ return nil, err
+ }
+ return models.Matchers{
+ hasFirstPathMatcher,
+ models.Matcher{
+ Type: models.MatchRegexp,
+ Name: doc.IDReservedFieldName,
+ Value: idRegexp,
+ },
+ }, nil
+ }
+
metricLength := graphite.CountMetricParts(query)
// Add space for a terminator character.
matchersLength := metricLength + 1
diff --git a/src/query/graphite/storage/m3_wrapper_test.go b/src/query/graphite/storage/m3_wrapper_test.go
index bb57ce9a1b..97d25c577a 100644
--- a/src/query/graphite/storage/m3_wrapper_test.go
+++ b/src/query/graphite/storage/m3_wrapper_test.go
@@ -26,6 +26,7 @@ import (
"testing"
"time"
+ "github.com/m3db/m3/src/m3ninx/doc"
"github.com/m3db/m3/src/query/block"
xctx "github.com/m3db/m3/src/query/graphite/context"
"github.com/m3db/m3/src/query/graphite/graphite"
@@ -81,6 +82,33 @@ func TestTranslateQuery(t *testing.T) {
assert.Equal(t, expected, matchers)
}
+func TestTranslateQueryStarStar(t *testing.T) {
+ query := `foo**bar`
+ end := time.Now()
+ start := end.Add(time.Hour * -2)
+ opts := FetchOptions{
+ StartTime: start,
+ EndTime: end,
+ DataOptions: DataOptions{
+ Timeout: time.Minute,
+ },
+ }
+
+ translated, err := translateQuery(query, opts, M3WrappedStorageOptions{})
+ assert.NoError(t, err)
+ assert.Equal(t, end, translated.End)
+ assert.Equal(t, start, translated.Start)
+ assert.Equal(t, time.Duration(0), translated.Interval)
+ assert.Equal(t, query, translated.Raw)
+ matchers := translated.TagMatchers
+ expected := models.Matchers{
+ {Type: models.MatchRegexp, Name: graphite.TagName(0), Value: []byte(".*")},
+ {Type: models.MatchRegexp, Name: doc.IDReservedFieldName, Value: []byte("foo.*bar")},
+ }
+
+ assert.Equal(t, expected, matchers)
+}
+
func TestTranslateQueryTrailingDot(t *testing.T) {
query := `foo.`
end := time.Now()
diff --git a/src/query/graphite/storage/series_metadata_map_new.go b/src/query/graphite/storage/series_metadata_map_new.go
index 6e8c0fefc1..ca10803ac6 100644
--- a/src/query/graphite/storage/series_metadata_map_new.go
+++ b/src/query/graphite/storage/series_metadata_map_new.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
+// Copyright (c) 2021 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
diff --git a/src/query/graphite/ts/series.go b/src/query/graphite/ts/series.go
index be211c95ef..02d88c712a 100644
--- a/src/query/graphite/ts/series.go
+++ b/src/query/graphite/ts/series.go
@@ -599,6 +599,9 @@ func Max(a, b float64, count int) float64 { return math.Max(a, b) }
// Last finds the latter of two values.
func Last(a, b float64, count int) float64 { return b }
+// Pow returns the first value to the power of the second value
+func Pow(a, b float64, count int) float64 { return math.Pow(a, b) }
+
// Median finds the median of a slice of values.
func Median(vals []float64, count int) float64 {
if count < 1 {
diff --git a/src/query/server/query.go b/src/query/server/query.go
index 2f82d7373f..21cd32561b 100644
--- a/src/query/server/query.go
+++ b/src/query/server/query.go
@@ -493,6 +493,7 @@ func Run(runOpts RunOptions) {
graphiteStorageOpts.RenderPartialStart = cfg.Carbon.RenderPartialStart
graphiteStorageOpts.RenderPartialEnd = cfg.Carbon.RenderPartialEnd
graphiteStorageOpts.RenderSeriesAllNaNs = cfg.Carbon.RenderSeriesAllNaNs
+ graphiteStorageOpts.CompileEscapeAllNotOnlyQuotes = cfg.Carbon.CompileEscapeAllNotOnlyQuotes
}
prometheusEngine, err := newPromQLEngine(cfg, prometheusEngineRegistry,
@@ -589,12 +590,10 @@ func Run(runOpts RunOptions) {
}
if cfg.Carbon != nil && cfg.Carbon.Ingester != nil {
- server, ok := startCarbonIngestion(cfg.Carbon, listenerOpts,
+ server := startCarbonIngestion(*cfg.Carbon.Ingester, listenerOpts,
instrumentOptions, logger, m3dbClusters, clusterNamespacesWatcher,
downsamplerAndWriter)
- if ok {
- defer server.Close()
- }
+ defer server.Close()
}
// Wait for process interrupt.
@@ -1094,15 +1093,14 @@ func startGRPCServer(
}
func startCarbonIngestion(
- cfg *config.CarbonConfiguration,
+ ingesterCfg config.CarbonIngesterConfiguration,
listenerOpts xnet.ListenerOptions,
iOpts instrument.Options,
logger *zap.Logger,
m3dbClusters m3.Clusters,
clusterNamespacesWatcher m3.ClusterNamespacesWatcher,
downsamplerAndWriter ingest.DownsamplerAndWriter,
-) (xserver.Server, bool) {
- ingesterCfg := cfg.Ingester
+) xserver.Server {
logger.Info("carbon ingestion enabled, configuring ingester")
// Setup worker pool.
@@ -1166,7 +1164,7 @@ func startCarbonIngestion(
logger.Info("started carbon ingestion server", zap.String("listenAddress", carbonListenAddress))
- return carbonServer, true
+ return carbonServer
}
func newDownsamplerAndWriter(
diff --git a/src/query/util/logging/log_test.go b/src/query/util/logging/log_test.go
index 7d0ccca73e..00fab9f63c 100644
--- a/src/query/util/logging/log_test.go
+++ b/src/query/util/logging/log_test.go
@@ -144,7 +144,7 @@ func TestPanicErrorResponder(t *testing.T) {
assert.Equal(t, 500, writer.status)
require.Equal(t, 1, len(writer.written))
- assert.Equal(t, "{\"error\":\"caught panic: beef\"}\n", writer.written[0])
+ assert.JSONEq(t, `{"status":"error","error":"caught panic: beef"}`, writer.written[0])
assertNoErrorLogs(t, stderr)
b, err := ioutil.ReadAll(stdout)
@@ -323,7 +323,7 @@ func TestWithResponseTimeAndPanicErrorLoggingFunc(t *testing.T) {
assert.Equal(t, 500, writer.status)
require.Equal(t, 1, len(writer.written))
- assert.Equal(t, "{\"error\":\"caught panic: err\"}\n", writer.written[0])
+ assert.JSONEq(t, `{"status":"error","error":"caught panic: err"}`, writer.written[0])
assertNoErrorLogs(t, stderr)
diff --git a/src/x/net/http/errors.go b/src/x/net/http/errors.go
index 331e23e4d3..5e10e54e5f 100644
--- a/src/x/net/http/errors.go
+++ b/src/x/net/http/errors.go
@@ -25,10 +25,19 @@ import (
"encoding/json"
"errors"
"net/http"
+ "sync"
xerrors "github.com/m3db/m3/src/x/errors"
)
+// ErrorRewriteFn is a function for rewriting response error.
+type ErrorRewriteFn func(error) error
+
+var (
+ errorRewriteFn ErrorRewriteFn = func(err error) error { return err }
+ errorRewriteFnLock sync.RWMutex
+)
+
// Error is an HTTP JSON error that also sets a return status code.
type Error interface {
// Fulfill error interface.
@@ -68,7 +77,8 @@ func (e errorWithCode) Code() int {
// ErrorResponse is a generic response for an HTTP error.
type ErrorResponse struct {
- Error string `json:"error"`
+ Status string `json:"status"`
+ Error string `json:"error"`
}
type options struct {
@@ -92,17 +102,31 @@ func WriteError(w http.ResponseWriter, err error, opts ...WriteErrorOption) {
fn(&o)
}
+ errorRewriteFnLock.RLock()
+ err = errorRewriteFn(err)
+ errorRewriteFnLock.RUnlock()
+
statusCode := getStatusCode(err)
if o.response == nil {
w.Header().Set(HeaderContentType, ContentTypeJSON)
w.WriteHeader(statusCode)
- json.NewEncoder(w).Encode(ErrorResponse{Error: err.Error()})
+ json.NewEncoder(w).Encode(ErrorResponse{Status: "error", Error: err.Error()}) //nolint:errcheck
} else {
w.WriteHeader(statusCode)
w.Write(o.response)
}
}
+// SetErrorRewriteFn sets error rewrite function.
+func SetErrorRewriteFn(f ErrorRewriteFn) ErrorRewriteFn {
+ errorRewriteFnLock.Lock()
+ defer errorRewriteFnLock.Unlock()
+
+ res := errorRewriteFn
+ errorRewriteFn = f
+ return res
+}
+
func getStatusCode(err error) int {
switch v := err.(type) {
case Error:
@@ -117,7 +141,7 @@ func getStatusCode(err error) int {
return http.StatusInternalServerError
}
-// IsClientError returns true if this error would result in 4xx status code
+// IsClientError returns true if this error would result in 4xx status code.
func IsClientError(err error) bool {
code := getStatusCode(err)
return code >= 400 && code < 500
diff --git a/src/x/net/http/errors_test.go b/src/x/net/http/errors_test.go
index 4bd5ecf46d..f5c0e6fabe 100644
--- a/src/x/net/http/errors_test.go
+++ b/src/x/net/http/errors_test.go
@@ -21,14 +21,55 @@
package xhttp
import (
+ "errors"
"fmt"
+ "net/http/httptest"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
xerrors "github.com/m3db/m3/src/x/errors"
)
+func TestErrorRewrite(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ expectedStatus int
+ expectedBody string
+ }{
+ {
+ name: "error that should not be rewritten",
+ err: errors.New("random error"),
+ expectedStatus: 500,
+ expectedBody: `{"status":"error","error":"random error"}`,
+ },
+ {
+ name: "error that should be rewritten",
+ err: xerrors.NewInvalidParamsError(errors.New("to be rewritten")),
+ expectedStatus: 500,
+ expectedBody: `{"status":"error","error":"rewritten error"}`,
+ },
+ }
+
+ SetErrorRewriteFn(func(err error) error {
+ if xerrors.IsInvalidParams(err) {
+ return errors.New("rewritten error")
+ }
+ return err
+ })
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ recorder := httptest.NewRecorder()
+ WriteError(recorder, tt.err)
+ assert.Equal(t, tt.expectedStatus, recorder.Code)
+ assert.JSONEq(t, tt.expectedBody, recorder.Body.String())
+ })
+ }
+}
+
func TestIsClientError(t *testing.T) {
tests := []struct {
err error
diff --git a/tools.go b/tools.go
index 66f754f1db..e59bb68871 100644
--- a/tools.go
+++ b/tools.go
@@ -6,6 +6,7 @@ import (
_ "github.com/fossas/fossa-cli/cmd/fossa"
_ "github.com/garethr/kubeval"
_ "github.com/golang/mock/mockgen"
+ _ "github.com/golangci/golangci-lint/cmd/golangci-lint"
_ "github.com/google/go-jsonnet/cmd/jsonnet"
_ "github.com/m3db/build-tools/utilities/genclean"
_ "github.com/m3db/tools/update-license"