From c3c4306c98c93646ddc0ff36d554486960bb0c33 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Sat, 19 Jan 2019 16:36:05 -0500 Subject: [PATCH 01/70] dont use statsdex code and cleanup --- src/x/strings/strings.go | 41 +++++++++++++++++++++++ src/x/strings/strings_test.go | 63 +++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) create mode 100644 src/x/strings/strings.go create mode 100644 src/x/strings/strings_test.go diff --git a/src/x/strings/strings.go b/src/x/strings/strings.go new file mode 100644 index 0000000000..18ebd31a10 --- /dev/null +++ b/src/x/strings/strings.go @@ -0,0 +1,41 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package strings + +import "bytes" + +// CondenseDuplicateChars condenses multiple duplicate chars in a row into one +func CondenseDuplicateChars(str string, char rune) string { + var buffer bytes.Buffer + prevChar := false + for _, c := range str { + if c == char { + if !prevChar { + buffer.WriteRune(c) + prevChar = true + } + } else { + buffer.WriteRune(c) + prevChar = false + } + } + return buffer.String() +} diff --git a/src/x/strings/strings_test.go b/src/x/strings/strings_test.go new file mode 100644 index 0000000000..7fb0ead25c --- /dev/null +++ b/src/x/strings/strings_test.go @@ -0,0 +1,63 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package strings + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCondenseDuplicateChars(t *testing.T) { + testCases := []struct { + input string + expectedOutput string + condenseChar rune + }{ + { + input: "", + expectedOutput: "", + condenseChar: ' ', + }, + { + input: "hello world", + expectedOutput: "hello world", + condenseChar: ' ', + }, + { + input: "hello world", + expectedOutput: "hello world", + condenseChar: ' ', + }, + { + input: "hello world ", + expectedOutput: "hello world ", + condenseChar: ' ', + }, + } + + for _, tc := range testCases { + t.Run(tc.input, func(t *testing.T) { + actual := CondenseDuplicateChars(tc.input, tc.condenseChar) + require.Equal(t, tc.expectedOutput, actual) + }) + } +} From 2f59a0bf66662be1f90f01c8c4a4e78444b54c77 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Mon, 21 Jan 2019 12:58:03 -0500 Subject: [PATCH 02/70] address feedback --- src/x/strings/strings.go | 41 ----------------------- src/x/strings/strings_test.go | 63 ----------------------------------- 2 files changed, 104 deletions(-) delete mode 100644 src/x/strings/strings.go delete mode 100644 src/x/strings/strings_test.go diff --git a/src/x/strings/strings.go b/src/x/strings/strings.go deleted file mode 100644 index 18ebd31a10..0000000000 --- a/src/x/strings/strings.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package strings - -import "bytes" - -// CondenseDuplicateChars condenses multiple duplicate chars in a row into one -func CondenseDuplicateChars(str string, char rune) string { - var buffer bytes.Buffer - prevChar := false - for _, c := range str { - if c == char { - if !prevChar { - buffer.WriteRune(c) - prevChar = true - } - } else { - buffer.WriteRune(c) - prevChar = false - } - } - return buffer.String() -} diff --git a/src/x/strings/strings_test.go b/src/x/strings/strings_test.go deleted file mode 100644 index 7fb0ead25c..0000000000 --- a/src/x/strings/strings_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2019 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package strings - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestCondenseDuplicateChars(t *testing.T) { - testCases := []struct { - input string - expectedOutput string - condenseChar rune - }{ - { - input: "", - expectedOutput: "", - condenseChar: ' ', - }, - { - input: "hello world", - expectedOutput: "hello world", - condenseChar: ' ', - }, - { - input: "hello world", - expectedOutput: "hello world", - condenseChar: ' ', - }, - { - input: "hello world ", - expectedOutput: "hello world ", - condenseChar: ' ', - }, - } - - for _, tc := range testCases { - t.Run(tc.input, func(t *testing.T) { - actual := CondenseDuplicateChars(tc.input, tc.condenseChar) - require.Equal(t, tc.expectedOutput, actual) - }) - } -} From d60d2fcc85d8500490ce8064c433c6b7557f1b47 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Mon, 21 Jan 2019 13:42:16 -0500 Subject: [PATCH 03/70] wip --- .../m3coordinator/ingest/carbon/ingest.go | 117 ++++++++++++++++++ .../ingest/{ => m3msg}/config.go | 2 +- .../ingest/{ => m3msg}/ingest.go | 2 +- .../ingest/{ => m3msg}/ingest_test.go | 2 +- src/cmd/services/m3query/config/config.go | 4 +- 5 files changed, 122 insertions(+), 5 deletions(-) create mode 100644 src/cmd/services/m3coordinator/ingest/carbon/ingest.go rename src/cmd/services/m3coordinator/ingest/{ => m3msg}/config.go (99%) rename src/cmd/services/m3coordinator/ingest/{ => m3msg}/ingest.go (99%) rename src/cmd/services/m3coordinator/ingest/{ => m3msg}/ingest_test.go (99%) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go new file mode 100644 index 0000000000..e9d0f722e5 --- /dev/null +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -0,0 +1,117 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ingestcarbon + +import ( + "net" + "sync" + "time" + + "code.uber.internal/infra/statsdex/protocols" + "code.uber.internal/infra/statsdex/services/m3dbingester/ingest" + "github.com/apex/log" + "github.com/m3db/m3/src/metrics/carbon" + "github.com/m3db/m3/src/metrics/policy" + "github.com/uber-go/tally" +) + +// Options configures the ingester. +type Options struct{} + +// NewIngester returns an ingester for carbon metrics. +func NewIngester( + ingester ingest.Ingester, + m tally.Scope, + opts Options, +) m3xserver.Handler { + return &carbonHandler{ + ingester: ingester, + metrics: newCarbonHandlerMetrics(m), + opts: opts, + } +} + +func (h *carbonHandler) Handle(conn net.Conn) { + var ( + wg sync.WaitGroup + beforeNetworkRead time.Time + ) + + callbackable := xserver.NewCallbackable(nil, func() { wg.Done() }) + s := carbon.NewScanner(conn) + for { + rolled := dice.Roll() + + // NB(cw) beforeNetworkRead must be read for every metrics now because it needs to be passed to + // writeFn for potential per-dc metric book keeping. + beforeNetworkRead = time.Now() + + if !s.Scan() { + break + } + + name, timestamp, value, recvd := s.MetricAndRecvTime() + + if rolled { + h.metrics.readTimeLatency.Record(time.Now().Sub(beforeNetworkRead)) + } + h.metrics.malformedCounter.Inc(int64(s.MalformedCount)) + s.MalformedCount = 0 + + var policy policy.StoragePolicy + if h.opts.EnablePolicyResolution { + var resolved bool + policy, resolved = h.opts.PolicyResolver.Resolve(name) + if !resolved { + if rolled { + log.Warnf("unable to resolve policy for id %s", name) + } + h.metrics.unresolvedIDs.Inc(1) + continue + } + } else { + policy = h.opts.StaticPolicy + } + + wg.Add(1) + h.ingester.Ingest(name, timestamp, recvd, value, policy, protocols.Carbon, callbackable) + } + + // Wait for all outstanding writes + wg.Wait() +} + +func (h *carbonHandler) Close() {} + +func newCarbonHandlerMetrics(m tally.Scope) carbonHandlerMetrics { + writesScope := m.SubScope("writes") + return carbonHandlerMetrics{ + unresolvedIDs: writesScope.Counter("ids-policy-unresolved"), + malformedCounter: writesScope.Counter("malformed"), + readTimeLatency: writesScope.Timer("read-time-latency"), + } +} + +type carbonHandlerMetrics struct { + unresolvedIDs tally.Counter + malformedCounter tally.Counter + readTimeLatency tally.Timer +} diff --git a/src/cmd/services/m3coordinator/ingest/config.go b/src/cmd/services/m3coordinator/ingest/m3msg/config.go similarity index 99% rename from src/cmd/services/m3coordinator/ingest/config.go rename to src/cmd/services/m3coordinator/ingest/m3msg/config.go index 715846ce92..3de2379432 100644 --- a/src/cmd/services/m3coordinator/ingest/config.go +++ b/src/cmd/services/m3coordinator/ingest/m3msg/config.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ingest +package ingestm3msg import ( "github.com/m3db/m3/src/query/storage" diff --git a/src/cmd/services/m3coordinator/ingest/ingest.go b/src/cmd/services/m3coordinator/ingest/m3msg/ingest.go similarity index 99% rename from src/cmd/services/m3coordinator/ingest/ingest.go rename to src/cmd/services/m3coordinator/ingest/m3msg/ingest.go index 998d45215e..863e83138c 100644 --- a/src/cmd/services/m3coordinator/ingest/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/m3msg/ingest.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ingest +package ingestm3msg import ( "context" diff --git a/src/cmd/services/m3coordinator/ingest/ingest_test.go b/src/cmd/services/m3coordinator/ingest/m3msg/ingest_test.go similarity index 99% rename from src/cmd/services/m3coordinator/ingest/ingest_test.go rename to src/cmd/services/m3coordinator/ingest/m3msg/ingest_test.go index cb7b410c3b..ae1953820a 100644 --- a/src/cmd/services/m3coordinator/ingest/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/m3msg/ingest_test.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package ingest +package ingestm3msg import ( "context" diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go index 762e56ed70..4aeac23a64 100644 --- a/src/cmd/services/m3query/config/config.go +++ b/src/cmd/services/m3query/config/config.go @@ -25,7 +25,7 @@ import ( etcdclient "github.com/m3db/m3/src/cluster/client/etcd" "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" - "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest/m3msg" "github.com/m3db/m3/src/cmd/services/m3coordinator/server/m3msg" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/storage/m3" @@ -127,7 +127,7 @@ type LimitsConfiguration struct { // IngestConfiguration is the configuration for ingestion server. type IngestConfiguration struct { // Ingester is the configuration for storage based ingester. - Ingester ingest.Configuration `yaml:"ingester"` + Ingester ingestm3msg.Configuration `yaml:"ingester"` // M3Msg is the configuration for m3msg server. M3Msg m3msg.Configuration `yaml:"m3msg"` From 54b6e01acc1ff97cb5ba4e894e016446424e6b81 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Mon, 21 Jan 2019 16:34:19 -0500 Subject: [PATCH 04/70] basic skeleton and generation of tags from name --- .../m3coordinator/ingest/carbon/ingest.go | 152 ++++++++++++------ .../ingest/carbon/ingest_test.go | 48 ++++++ 2 files changed, 148 insertions(+), 52 deletions(-) create mode 100644 src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index e9d0f722e5..eea8f06a1e 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -21,85 +21,91 @@ package ingestcarbon import ( + "bytes" + "errors" + "fmt" "net" "sync" - "time" - "code.uber.internal/infra/statsdex/protocols" - "code.uber.internal/infra/statsdex/services/m3dbingester/ingest" - "github.com/apex/log" "github.com/m3db/m3/src/metrics/carbon" - "github.com/m3db/m3/src/metrics/policy" + "github.com/m3db/m3/src/query/models" + "github.com/m3db/m3/src/query/ts" + "github.com/m3db/m3x/instrument" + m3xserver "github.com/m3db/m3x/server" + xsync "github.com/m3db/m3x/sync" + xtime "github.com/m3db/m3x/time" + "github.com/uber-go/tally" ) +var ( + carbonSeparatorByte = byte('.') + carbonSeparatorBytes = []byte{carbonSeparatorByte} + + errCannotGenerateTagsFromEmptyName = errors.New("cannot generate tags from empty name") +) + +// StorageWriter is the interface that must be provided to the ingester so that it can +// write the ingested metrics. +type StorageWriter interface { + Write(tags models.Tags, dp ts.Datapoint, unit xtime.Unit) error +} + // Options configures the ingester. -type Options struct{} +type Options struct { + InstrumentOptions instrument.Options + WorkerPool xsync.PooledWorkerPool +} // NewIngester returns an ingester for carbon metrics. func NewIngester( - ingester ingest.Ingester, - m tally.Scope, + storage StorageWriter, opts Options, ) m3xserver.Handler { - return &carbonHandler{ - ingester: ingester, - metrics: newCarbonHandlerMetrics(m), - opts: opts, + return &ingester{ + storage: storage, + opts: opts, } } -func (h *carbonHandler) Handle(conn net.Conn) { - var ( - wg sync.WaitGroup - beforeNetworkRead time.Time - ) - - callbackable := xserver.NewCallbackable(nil, func() { wg.Done() }) - s := carbon.NewScanner(conn) - for { - rolled := dice.Roll() - - // NB(cw) beforeNetworkRead must be read for every metrics now because it needs to be passed to - // writeFn for potential per-dc metric book keeping. - beforeNetworkRead = time.Now() - - if !s.Scan() { - break - } +type ingester struct { + storage StorageWriter + opts Options + conn net.Conn +} - name, timestamp, value, recvd := s.MetricAndRecvTime() +func (i *ingester) Handle(conn net.Conn) { + if i.conn != nil { + // TODO: Something + } + i.conn = conn - if rolled { - h.metrics.readTimeLatency.Record(time.Now().Sub(beforeNetworkRead)) - } - h.metrics.malformedCounter.Inc(int64(s.MalformedCount)) - s.MalformedCount = 0 + var ( + wg = sync.WaitGroup{} + s = carbon.NewScanner(conn) + ) - var policy policy.StoragePolicy - if h.opts.EnablePolicyResolution { - var resolved bool - policy, resolved = h.opts.PolicyResolver.Resolve(name) - if !resolved { - if rolled { - log.Warnf("unable to resolve policy for id %s", name) - } - h.metrics.unresolvedIDs.Inc(1) - continue - } - } else { - policy = h.opts.StaticPolicy - } + for s.Scan() { + _, timestamp, value := s.Metric() wg.Add(1) - h.ingester.Ingest(name, timestamp, recvd, value, policy, protocols.Carbon, callbackable) + i.opts.WorkerPool.Go(func() { + dp := ts.Datapoint{Timestamp: timestamp, Value: value} + i.storage.Write(models.Tags{}, dp, xtime.Second) + wg.Done() + }) + // i.metrics.malformedCounter.Inc(int64(s.MalformedCount)) + s.MalformedCount = 0 } // Wait for all outstanding writes wg.Wait() } -func (h *carbonHandler) Close() {} +func (i *ingester) Close() { + // TODO: Log error + i.conn.Close() +} func newCarbonHandlerMetrics(m tally.Scope) carbonHandlerMetrics { writesScope := m.SubScope("writes") @@ -115,3 +121,45 @@ type carbonHandlerMetrics struct { malformedCounter tally.Counter readTimeLatency tally.Timer } + +func generateTagsFromName(name []byte) (models.Tags, error) { + if len(name) == 0 { + return models.Tags{}, errCannotGenerateTagsFromEmptyName + } + var ( + numTags = bytes.Count(name, carbonSeparatorBytes) + 1 + tagsSlice = make([]models.Tag, 0, numTags) + tags = models.Tags{Tags: tagsSlice} + ) + + startIdx := 0 + tagNum := 0 + for i, charByte := range name { + if charByte == carbonSeparatorByte { + tags.AddTag(models.Tag{ + // TODO: Fix me + Name: []byte(fmt.Sprintf("__$%d__", tagNum)), + Value: name[startIdx:i], + }) + } + } + + // Write out the final tag since the for loop above will miss anything + // after the final separator. Note, that we make sure that the final + // character in the name is not the separator because in that case there + // would be no additional tag to add. I.E if the input was: + // foo.bar.baz + // then the for loop would append foo and bar, but we would still need to + // append baz, however, if the input was: + // foo.bar.baz. + // then the foor loop would have appended foo, bar, and baz already. + if name[len(name)-1] != carbonSeparatorByte { + // TODO: Fix me + tags = tags.AddTag(models.Tag{ + Name: []byte(fmt.Sprintf("__$%d__", len(tags.Tags))), + Value: name[startIdx:], + }) + } + + return tags, nil +} diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go new file mode 100644 index 0000000000..6b98c10d89 --- /dev/null +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -0,0 +1,48 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ingestcarbon + +import ( + "testing" + + "github.com/m3db/m3/src/query/models" + "github.com/stretchr/testify/require" +) + +func TestGenerateTagsFromName(t *testing.T) { + testCases := []struct { + name string + expectedTags []models.Tag + }{ + { + name: "foo", + expectedTags: []models.Tag{ + {Name: []byte("__$0__"), Value: []byte("foo")}, + }, + }, + } + + for _, tc := range testCases { + tags, err := generateTagsFromName([]byte(tc.name)) + require.NoError(t, err) + require.Equal(t, tc.expectedTags, tags.Tags) + } +} From 9a156ca53e8402f69cce630deaac392fbbd8ce2a Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Mon, 21 Jan 2019 16:40:41 -0500 Subject: [PATCH 05/70] wip --- .../m3coordinator/ingest/carbon/ingest.go | 17 +++++++------ .../ingest/carbon/ingest_test.go | 24 +++++++++++++++++++ 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index eea8f06a1e..945ffec110 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -127,20 +127,22 @@ func generateTagsFromName(name []byte) (models.Tags, error) { return models.Tags{}, errCannotGenerateTagsFromEmptyName } var ( - numTags = bytes.Count(name, carbonSeparatorBytes) + 1 - tagsSlice = make([]models.Tag, 0, numTags) - tags = models.Tags{Tags: tagsSlice} + numTags = bytes.Count(name, carbonSeparatorBytes) + 1 + tags = make([]models.Tag, 0, numTags) ) startIdx := 0 tagNum := 0 for i, charByte := range name { if charByte == carbonSeparatorByte { - tags.AddTag(models.Tag{ + fmt.Println("appending: ", string(name[startIdx:i])) + tags = append(tags, models.Tag{ // TODO: Fix me Name: []byte(fmt.Sprintf("__$%d__", tagNum)), Value: name[startIdx:i], }) + startIdx = i + 1 + tagNum++ } } @@ -155,11 +157,12 @@ func generateTagsFromName(name []byte) (models.Tags, error) { // then the foor loop would have appended foo, bar, and baz already. if name[len(name)-1] != carbonSeparatorByte { // TODO: Fix me - tags = tags.AddTag(models.Tag{ - Name: []byte(fmt.Sprintf("__$%d__", len(tags.Tags))), + fmt.Println("appending: ", string(name[startIdx:])) + tags = append(tags, models.Tag{ + Name: []byte(fmt.Sprintf("__$%d__", len(tags))), Value: name[startIdx:], }) } - return tags, nil + return models.Tags{Tags: tags}, nil } diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index 6b98c10d89..2a37760455 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -38,6 +38,30 @@ func TestGenerateTagsFromName(t *testing.T) { {Name: []byte("__$0__"), Value: []byte("foo")}, }, }, + { + name: "foo.bar.baz", + expectedTags: []models.Tag{ + {Name: []byte("__$0__"), Value: []byte("foo")}, + {Name: []byte("__$1__"), Value: []byte("bar")}, + {Name: []byte("__$2__"), Value: []byte("baz")}, + }, + }, + { + name: "foo.bar.baz.", + expectedTags: []models.Tag{ + {Name: []byte("__$0__"), Value: []byte("foo")}, + {Name: []byte("__$1__"), Value: []byte("bar")}, + {Name: []byte("__$2__"), Value: []byte("baz")}, + }, + }, + { + name: "foo..bar..baz..", + expectedTags: []models.Tag{ + {Name: []byte("__$0__"), Value: []byte("foo")}, + {Name: []byte("__$1__"), Value: []byte("bar")}, + {Name: []byte("__$2__"), Value: []byte("baz")}, + }, + }, } for _, tc := range testCases { From 7dabba8dd67215a91fd89b6752867c0b56e59255 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Mon, 21 Jan 2019 16:55:30 -0500 Subject: [PATCH 06/70] Add benchmark for generateTagsFromName and reuse names as much as possible --- .../m3coordinator/ingest/carbon/ingest.go | 34 ++++++++++++--- .../ingest/carbon/ingest_benchmark_test.go | 43 +++++++++++++++++++ .../ingest/carbon/ingest_test.go | 20 ++++++--- 3 files changed, 84 insertions(+), 13 deletions(-) create mode 100644 src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 945ffec110..5651dd74b1 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -42,6 +42,9 @@ var ( carbonSeparatorByte = byte('.') carbonSeparatorBytes = []byte{carbonSeparatorByte} + numPreFormattedKeyNames = 100 + preFormattedKeyNames = [][]byte{} + errCannotGenerateTagsFromEmptyName = errors.New("cannot generate tags from empty name") ) @@ -135,10 +138,12 @@ func generateTagsFromName(name []byte) (models.Tags, error) { tagNum := 0 for i, charByte := range name { if charByte == carbonSeparatorByte { - fmt.Println("appending: ", string(name[startIdx:i])) + if i+1 < len(name) && name[i+1] == carbonSeparatorByte { + return models.Tags{}, fmt.Errorf("carbon metric: %s has duplicate separator", string(name)) + } + tags = append(tags, models.Tag{ - // TODO: Fix me - Name: []byte(fmt.Sprintf("__$%d__", tagNum)), + Name: getOrGenerateKeyName(tagNum), Value: name[startIdx:i], }) startIdx = i + 1 @@ -156,13 +161,30 @@ func generateTagsFromName(name []byte) (models.Tags, error) { // foo.bar.baz. // then the foor loop would have appended foo, bar, and baz already. if name[len(name)-1] != carbonSeparatorByte { - // TODO: Fix me - fmt.Println("appending: ", string(name[startIdx:])) tags = append(tags, models.Tag{ - Name: []byte(fmt.Sprintf("__$%d__", len(tags))), + Name: getOrGenerateKeyName(tagNum), Value: name[startIdx:], }) } return models.Tags{Tags: tags}, nil } + +func getOrGenerateKeyName(idx int) []byte { + if idx < len(preFormattedKeyNames) { + return preFormattedKeyNames[idx] + } + + return []byte(fmt.Sprintf("__$%d__", idx)) +} + +func generateKeyName(idx int) []byte { + return []byte(fmt.Sprintf("__$%d__", idx)) +} + +func init() { + for i := 0; i < numPreFormattedKeyNames; i++ { + keyName := generateKeyName(i) + preFormattedKeyNames = append(preFormattedKeyNames, keyName) + } +} diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go new file mode 100644 index 0000000000..2d48c87879 --- /dev/null +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go @@ -0,0 +1,43 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ingestcarbon + +import ( + "testing" + + "github.com/m3db/m3/src/query/models" +) + +var benchmarkGenerateTagsSink models.Tags + +func BenchmarkGenerateTagsFromName(b *testing.B) { + var ( + testName = []byte("foo.bar.baz.bax") + err error + ) + + for i := 0; i < b.N; i++ { + benchmarkGenerateTagsSink, err = generateTagsFromName(testName) + if err != nil { + panic(err) + } + } +} diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index 2a37760455..e33ec07372 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -21,6 +21,7 @@ package ingestcarbon import ( + "fmt" "testing" "github.com/m3db/m3/src/query/models" @@ -31,6 +32,7 @@ func TestGenerateTagsFromName(t *testing.T) { testCases := []struct { name string expectedTags []models.Tag + expectedErr error }{ { name: "foo", @@ -55,18 +57,22 @@ func TestGenerateTagsFromName(t *testing.T) { }, }, { - name: "foo..bar..baz..", - expectedTags: []models.Tag{ - {Name: []byte("__$0__"), Value: []byte("foo")}, - {Name: []byte("__$1__"), Value: []byte("bar")}, - {Name: []byte("__$2__"), Value: []byte("baz")}, - }, + name: "foo..bar..baz..", + expectedErr: fmt.Errorf("carbon metric: foo..bar..baz.. has duplicate separator"), + }, + { + name: "foo.bar.baz..", + expectedErr: fmt.Errorf("carbon metric: foo.bar.baz.. has duplicate separator"), }, } for _, tc := range testCases { tags, err := generateTagsFromName([]byte(tc.name)) - require.NoError(t, err) + if tc.expectedErr != nil { + require.Equal(t, tc.expectedErr, err) + } else { + require.NoError(t, err) + } require.Equal(t, tc.expectedTags, tags.Tags) } } From 9a08c76dd9e0e448254299df3ec328e65dba7590 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Mon, 21 Jan 2019 16:56:24 -0500 Subject: [PATCH 07/70] add comments --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 5651dd74b1..bf0f60943d 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -39,11 +39,14 @@ import ( ) var ( + // Used for parsing carbon names into tags. carbonSeparatorByte = byte('.') carbonSeparatorBytes = []byte{carbonSeparatorByte} + // Number of pre-formatted key names to generate in the init() function. numPreFormattedKeyNames = 100 - preFormattedKeyNames = [][]byte{} + // Should never be modified after init(). + preFormattedKeyNames = [][]byte{} errCannotGenerateTagsFromEmptyName = errors.New("cannot generate tags from empty name") ) From 689cd957899bdc3527781722c9e48ef13a66790b Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Mon, 21 Jan 2019 16:59:23 -0500 Subject: [PATCH 08/70] add space --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index bf0f60943d..7948d65f1a 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -132,6 +132,7 @@ func generateTagsFromName(name []byte) (models.Tags, error) { if len(name) == 0 { return models.Tags{}, errCannotGenerateTagsFromEmptyName } + var ( numTags = bytes.Count(name, carbonSeparatorBytes) + 1 tags = make([]models.Tag, 0, numTags) From cf50185acf8dc59dbea13e2921a2544d1af5c50c Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Tue, 22 Jan 2019 14:42:03 -0500 Subject: [PATCH 09/70] Add downsampleandwrite + tests --- .../services/m3coordinator/ingest/write.go | 184 ++++++++++++++ .../m3coordinator/ingest/write_test.go | 228 ++++++++++++++++++ 2 files changed, 412 insertions(+) create mode 100644 src/cmd/services/m3coordinator/ingest/write.go create mode 100644 src/cmd/services/m3coordinator/ingest/write_test.go diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go new file mode 100644 index 0000000000..4efc6b8ab2 --- /dev/null +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -0,0 +1,184 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package write + +import ( + "context" + "sync" + + "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" + "github.com/m3db/m3/src/query/models" + "github.com/m3db/m3/src/query/storage" + "github.com/m3db/m3/src/query/ts" + xerrors "github.com/m3db/m3x/errors" + xtime "github.com/m3db/m3x/time" +) + +// DownsampleAndWriteIter is an interface that can be implemented to use +// the WriteBatch method. +type DownsampleAndWriteIter interface { + Next() bool + Current() (models.Tags, ts.Datapoints, xtime.Unit) + Reset() error + Error() error +} + +// DownsamplerAndWriter is the interface for the downsamplerAndWriter which +// writes metrics to the downsampler as well as to storage in unaggregated form. +type DownsamplerAndWriter interface { + Write( + ctx context.Context, + tags models.Tags, + datapoints ts.Datapoints, + unit xtime.Unit, + ) error + + WriteBatch( + ctx context.Context, + iter DownsampleAndWriteIter, + ) error +} + +// downsamplerAndWriter encapsulates the logic for writing data to the downsampler, +// as well as in unaggregated form to storage. +type downsamplerAndWriter struct { + store storage.Storage + downsampler downsample.Downsampler +} + +// NewDownsamplerAndWriter creates a new downsampler and writer. +func NewDownsamplerAndWriter( + store storage.Storage, + downsampler downsample.Downsampler, +) DownsamplerAndWriter { + return &downsamplerAndWriter{ + store: store, + downsampler: downsampler, + } +} + +func (d *downsamplerAndWriter) Write( + ctx context.Context, + tags models.Tags, + datapoints ts.Datapoints, + unit xtime.Unit, +) error { + appender, err := d.downsampler.NewMetricsAppender() + if err != nil { + return err + } + + for _, tag := range tags.Tags { + appender.AddTag(tag.Name, tag.Value) + } + + samplesAppender, err := appender.SamplesAppender() + if err != nil { + return err + } + + for _, dp := range datapoints { + err := samplesAppender.AppendGaugeSample(dp.Value) + if err != nil { + return err + } + } + + appender.Finalize() + + return d.store.Write(ctx, &storage.WriteQuery{ + Tags: tags, + Datapoints: datapoints, + Unit: unit, + Attributes: storage.Attributes{ + MetricsType: storage.UnaggregatedMetricsType, + }, + }) +} + +func (d *downsamplerAndWriter) WriteBatch( + ctx context.Context, + iter DownsampleAndWriteIter, +) error { + // Write aggregated. + appender, err := d.downsampler.NewMetricsAppender() + if err != nil { + return err + } + + for iter.Next() { + appender.Reset() + tags, datapoints, _ := iter.Current() + for _, tag := range tags.Tags { + appender.AddTag(tag.Name, tag.Value) + } + + samplesAppender, err := appender.SamplesAppender() + if err != nil { + return err + } + + for _, dp := range datapoints { + err := samplesAppender.AppendGaugeSample(dp.Value) + if err != nil { + return err + } + } + } + appender.Finalize() + + if err := iter.Error(); err != nil { + return err + } + if err := iter.Reset(); err != nil { + return err + } + + // Write unaggregated. + var ( + wg = &sync.WaitGroup{} + errLock sync.Mutex + multiErr xerrors.MultiError + ) + for iter.Next() { + wg.Add(1) + tags, datapoints, unit := iter.Current() + go func() { + err := d.store.Write(ctx, &storage.WriteQuery{ + Tags: tags, + Datapoints: datapoints, + Unit: unit, + Attributes: storage.Attributes{ + MetricsType: storage.UnaggregatedMetricsType, + }, + }) + if err != nil { + errLock.Lock() + multiErr = multiErr.Add(err) + errLock.Unlock() + } + wg.Done() + }() + } + + wg.Wait() + return multiErr.LastError() +} diff --git a/src/cmd/services/m3coordinator/ingest/write_test.go b/src/cmd/services/m3coordinator/ingest/write_test.go new file mode 100644 index 0000000000..f76893c368 --- /dev/null +++ b/src/cmd/services/m3coordinator/ingest/write_test.go @@ -0,0 +1,228 @@ +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package write + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" + "github.com/m3db/m3/src/dbnode/client" + "github.com/m3db/m3/src/query/models" + "github.com/m3db/m3/src/query/test/m3" + "github.com/m3db/m3/src/query/ts" + xtime "github.com/m3db/m3x/time" +) + +var ( + testTags1 = models.Tags{Tags: []models.Tag{ + { + Name: []byte("test_1_key_1"), + Value: []byte("test_1_value_1"), + }, + { + Name: []byte("test_1_key_2"), + Value: []byte("test_1_value_2"), + }, + { + Name: []byte("test_1_key_3"), + Value: []byte("test_1_value_3"), + }, + }} + testTags2 = models.Tags{Tags: []models.Tag{ + { + Name: []byte("test_2_key_1"), + Value: []byte("test_2_value_1"), + }, + { + Name: []byte("test_2_key_2"), + Value: []byte("test_2_value_2"), + }, + { + Name: []byte("test_2_key_3"), + Value: []byte("test_2_value_3"), + }, + }} + + testDatapoints1 = []ts.Datapoint{ + { + Timestamp: time.Unix(0, 0), + Value: 0, + }, + { + Timestamp: time.Unix(0, 1), + Value: 1, + }, + { + Timestamp: time.Unix(0, 2), + Value: 2, + }, + } + testDatapoints2 = []ts.Datapoint{ + { + Timestamp: time.Unix(0, 3), + Value: 3, + }, + { + Timestamp: time.Unix(0, 4), + Value: 4, + }, + { + Timestamp: time.Unix(0, 5), + Value: 5, + }, + } + + testEntries = []testIterEntry{ + {tags: testTags1, datapoints: testDatapoints1}, + {tags: testTags2, datapoints: testDatapoints2}, + } +) + +type testIter struct { + idx int + entries []testIterEntry +} + +type testIterEntry struct { + tags models.Tags + datapoints []ts.Datapoint +} + +func newTestIter(entries []testIterEntry) *testIter { + return &testIter{ + idx: -1, + entries: entries, + } +} + +func (i *testIter) Next() bool { + i.idx++ + return i.idx < len(i.entries) +} + +func (i *testIter) Current() (models.Tags, ts.Datapoints, xtime.Unit) { + if len(i.entries) == 0 || i.idx < 0 || i.idx >= len(i.entries) { + return models.Tags{}, nil, 0 + } + + curr := i.entries[i.idx] + return curr.tags, curr.datapoints, xtime.Second +} + +func (i *testIter) Reset() error { + i.idx = -1 + return nil +} + +func (i *testIter) Error() error { + return nil +} + +func TestDownsampleAndWrite(t *testing.T) { + ctrl := gomock.NewController(t) + downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl) + + var ( + mockSamplesAppender = downsample.NewMockSamplesAppender(ctrl) + mockMetricsAppender = downsample.NewMockMetricsAppender(ctrl) + ) + + mockMetricsAppender. + EXPECT(). + SamplesAppender(). + Return(mockSamplesAppender, nil) + for _, tag := range testTags1.Tags { + mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value) + } + + for _, dp := range testDatapoints1 { + mockSamplesAppender.EXPECT().AppendGaugeSample(dp.Value) + } + downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil) + + mockMetricsAppender.EXPECT().Finalize() + + for _, dp := range testDatapoints1 { + session.EXPECT().WriteTagged( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), gomock.Any()) + } + + err := downAndWrite.Write(context.Background(), testTags1, testDatapoints1, xtime.Second) + require.NoError(t, err) +} + +func TestDownsampleAndWriteBatch(t *testing.T) { + ctrl := gomock.NewController(t) + downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl) + + var ( + mockSamplesAppender = downsample.NewMockSamplesAppender(ctrl) + mockMetricsAppender = downsample.NewMockMetricsAppender(ctrl) + ) + + mockMetricsAppender. + EXPECT(). + SamplesAppender(). + Return(mockSamplesAppender, nil).Times(2) + for _, tag := range testTags1.Tags { + mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value) + } + for _, dp := range testDatapoints1 { + mockSamplesAppender.EXPECT().AppendGaugeSample(dp.Value) + } + for _, tag := range testTags2.Tags { + mockMetricsAppender.EXPECT().AddTag(tag.Name, tag.Value) + } + for _, dp := range testDatapoints2 { + mockSamplesAppender.EXPECT().AppendGaugeSample(dp.Value) + } + downsampler.EXPECT().NewMetricsAppender().Return(mockMetricsAppender, nil) + + mockMetricsAppender.EXPECT().Reset().Times(2) + mockMetricsAppender.EXPECT().Finalize() + + for _, dp := range testDatapoints1 { + session.EXPECT().WriteTagged( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), gomock.Any()) + } + for _, dp := range testDatapoints2 { + session.EXPECT().WriteTagged( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), gomock.Any()) + } + + iter := newTestIter(testEntries) + err := downAndWrite.WriteBatch(context.Background(), iter) + require.NoError(t, err) +} + +func newTestDownsamplerAndWriter( + t *testing.T, + ctrl *gomock.Controller, +) (*downsamplerAndWriter, *downsample.MockDownsampler, *client.MockSession) { + storage, session := m3.NewStorageAndSession(t, ctrl) + downsampler := downsample.NewMockDownsampler(ctrl) + return NewDownsamplerAndWriter(storage, downsampler).(*downsamplerAndWriter), downsampler, session +} From 10dc79050b0dd8ee1f976bd811572178a22daa58 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Tue, 22 Jan 2019 15:05:58 -0500 Subject: [PATCH 10/70] refactor remote write to use new downsamplerAndWriter --- .../services/m3coordinator/ingest/write.go | 2 +- .../m3coordinator/ingest/write_mock.go | 87 ++++++++++ .../m3coordinator/ingest/write_test.go | 2 +- .../api/v1/handler/prometheus/remote/write.go | 148 +++++------------- .../handler/prometheus/remote/write_test.go | 23 +-- src/query/generated/mocks/generate.go | 1 + 6 files changed, 145 insertions(+), 118 deletions(-) create mode 100644 src/cmd/services/m3coordinator/ingest/write_mock.go diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index 4efc6b8ab2..71db890bc0 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package write +package ingest import ( "context" diff --git a/src/cmd/services/m3coordinator/ingest/write_mock.go b/src/cmd/services/m3coordinator/ingest/write_mock.go new file mode 100644 index 0000000000..d1f9577a5b --- /dev/null +++ b/src/cmd/services/m3coordinator/ingest/write_mock.go @@ -0,0 +1,87 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/m3db/m3/src/cmd/services/m3coordinator/ingest (interfaces: DownsamplerAndWriter) + +// Copyright (c) 2019 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package ingest is a generated GoMock package. +package ingest + +import ( + "context" + "reflect" + + "github.com/m3db/m3/src/query/models" + "github.com/m3db/m3/src/query/ts" + "github.com/m3db/m3x/time" + + "github.com/golang/mock/gomock" +) + +// MockDownsamplerAndWriter is a mock of DownsamplerAndWriter interface +type MockDownsamplerAndWriter struct { + ctrl *gomock.Controller + recorder *MockDownsamplerAndWriterMockRecorder +} + +// MockDownsamplerAndWriterMockRecorder is the mock recorder for MockDownsamplerAndWriter +type MockDownsamplerAndWriterMockRecorder struct { + mock *MockDownsamplerAndWriter +} + +// NewMockDownsamplerAndWriter creates a new mock instance +func NewMockDownsamplerAndWriter(ctrl *gomock.Controller) *MockDownsamplerAndWriter { + mock := &MockDownsamplerAndWriter{ctrl: ctrl} + mock.recorder = &MockDownsamplerAndWriterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockDownsamplerAndWriter) EXPECT() *MockDownsamplerAndWriterMockRecorder { + return m.recorder +} + +// Write mocks base method +func (m *MockDownsamplerAndWriter) Write(arg0 context.Context, arg1 models.Tags, arg2 ts.Datapoints, arg3 time.Unit) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Write", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Write indicates an expected call of Write +func (mr *MockDownsamplerAndWriterMockRecorder) Write(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockDownsamplerAndWriter)(nil).Write), arg0, arg1, arg2, arg3) +} + +// WriteBatch mocks base method +func (m *MockDownsamplerAndWriter) WriteBatch(arg0 context.Context, arg1 DownsampleAndWriteIter) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteBatch", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteBatch indicates an expected call of WriteBatch +func (mr *MockDownsamplerAndWriterMockRecorder) WriteBatch(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteBatch", reflect.TypeOf((*MockDownsamplerAndWriter)(nil).WriteBatch), arg0, arg1) +} diff --git a/src/cmd/services/m3coordinator/ingest/write_test.go b/src/cmd/services/m3coordinator/ingest/write_test.go index f76893c368..690d7ba5fb 100644 --- a/src/cmd/services/m3coordinator/ingest/write_test.go +++ b/src/cmd/services/m3coordinator/ingest/write_test.go @@ -18,7 +18,7 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -package write +package ingest import ( "context" diff --git a/src/query/api/v1/handler/prometheus/remote/write.go b/src/query/api/v1/handler/prometheus/remote/write.go index e56cf0df3d..441c93e102 100644 --- a/src/query/api/v1/handler/prometheus/remote/write.go +++ b/src/query/api/v1/handler/prometheus/remote/write.go @@ -24,17 +24,17 @@ import ( "context" "errors" "net/http" - "sync" - "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/query/api/v1/handler" "github.com/m3db/m3/src/query/api/v1/handler/prometheus" "github.com/m3db/m3/src/query/generated/proto/prompb" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/storage" + "github.com/m3db/m3/src/query/ts" "github.com/m3db/m3/src/query/util/logging" "github.com/m3db/m3/src/x/net/http" - xerrors "github.com/m3db/m3x/errors" + xtime "github.com/m3db/m3x/time" "github.com/golang/protobuf/proto" "github.com/uber-go/tally" @@ -55,28 +55,25 @@ var ( // PromWriteHandler represents a handler for prometheus write endpoint. type PromWriteHandler struct { - store storage.Storage - downsampler downsample.Downsampler - promWriteMetrics promWriteMetrics - tagOptions models.TagOptions + downsamplerAndWriter ingest.DownsamplerAndWriter + promWriteMetrics promWriteMetrics + tagOptions models.TagOptions } // NewPromWriteHandler returns a new instance of handler. func NewPromWriteHandler( - store storage.Storage, - downsampler downsample.Downsampler, + downsamplerAndWriter ingest.DownsamplerAndWriter, tagOptions models.TagOptions, scope tally.Scope, ) (http.Handler, error) { - if store == nil && downsampler == nil { + if downsamplerAndWriter == nil { return nil, errNoStorageOrDownsampler } return &PromWriteHandler{ - store: store, - downsampler: downsampler, - promWriteMetrics: newPromWriteMetrics(scope), - tagOptions: tagOptions, + downsamplerAndWriter: downsamplerAndWriter, + promWriteMetrics: newPromWriteMetrics(scope), + tagOptions: tagOptions, }, nil } @@ -101,7 +98,9 @@ func (h *PromWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { xhttp.Error(w, rErr.Inner(), rErr.Code()) return } - if err := h.write(r.Context(), req); err != nil { + + err := h.write(r.Context(), req) + if err != nil { h.promWriteMetrics.writeErrorsServer.Inc(1) logging.WithContext(r.Context()).Error("Write error", zap.Any("err", err)) xhttp.Error(w, err, http.StatusInternalServerError) @@ -126,104 +125,43 @@ func (h *PromWriteHandler) parseRequest(r *http.Request) (*prompb.WriteRequest, } func (h *PromWriteHandler) write(ctx context.Context, r *prompb.WriteRequest) error { - var ( - wg sync.WaitGroup - writeUnaggErr error - writeAggErr error - ) - if h.downsampler != nil { - // If writing downsampled aggregations, write them async - wg.Add(1) - go func() { - writeAggErr = h.writeAggregated(ctx, r) - wg.Done() - }() - } - - if h.store != nil { - // Write the unaggregated points out, don't spawn goroutine - // so we reduce number of goroutines just a fraction - writeUnaggErr = h.writeUnaggregated(ctx, r) - } - - if h.downsampler != nil { - // Wait for downsampling to finish if we wrote datapoints - // for aggregations - wg.Wait() - } + iter := newPromTSIter(r.Timeseries) + return h.downsamplerAndWriter.WriteBatch(ctx, iter) +} - var multiErr xerrors.MultiError - multiErr = multiErr.Add(writeUnaggErr) - multiErr = multiErr.Add(writeAggErr) - return multiErr.FinalError() +type promTSIter struct { + idx int + timeseries []*prompb.TimeSeries } -func (h *PromWriteHandler) writeUnaggregated( - ctx context.Context, - r *prompb.WriteRequest, -) error { - var ( - wg sync.WaitGroup - errLock sync.Mutex - multiErr xerrors.MultiError - ) - for _, t := range r.Timeseries { - t := t // Capture for goroutine - - // TODO(r): Consider adding a worker pool to limit write - // request concurrency, instead of using the batch size - // of incoming request to determine concurrency (some level of control). - wg.Add(1) - go func() { - write := storage.PromWriteTSToM3(t, h.tagOptions) - write.Attributes = storage.Attributes{ - MetricsType: storage.UnaggregatedMetricsType, - } - - if err := h.store.Write(ctx, write); err != nil { - errLock.Lock() - multiErr = multiErr.Add(err) - errLock.Unlock() - } - - wg.Done() - }() - } +func newPromTSIter(timeseries []*prompb.TimeSeries) *promTSIter { + return &promTSIter{idx: -1, timeseries: timeseries} +} - wg.Wait() - return multiErr.LastError() +func (i *promTSIter) Next() bool { + i.idx++ + return i.idx < len(i.timeseries) } -func (h *PromWriteHandler) writeAggregated( - _ context.Context, - r *prompb.WriteRequest, -) error { - metricsAppender, err := h.downsampler.NewMetricsAppender() - if err != nil { - return err +func (i *promTSIter) Current() (models.Tags, ts.Datapoints, xtime.Unit) { + if len(i.timeseries) == 0 || i.idx < 0 || i.idx >= len(i.timeseries) { + return models.Tags{}, nil, 0 } - multiErr := xerrors.NewMultiError() - for _, ts := range r.Timeseries { - metricsAppender.Reset() - for _, label := range ts.Labels { - metricsAppender.AddTag(label.Name, label.Value) - } - - samplesAppender, err := metricsAppender.SamplesAppender() - if err != nil { - multiErr = multiErr.Add(err) - continue - } - - for _, elem := range ts.Samples { - err := samplesAppender.AppendGaugeSample(elem.Value) - if err != nil { - multiErr = multiErr.Add(err) - } - } + curr := i.timeseries[i.idx] + // TODO: Add a storage function for this? + tags := make([]models.Tag, 0, len(curr.Labels)) + for _, label := range curr.Labels { + tags = append(tags, models.Tag{Name: label.Name, Value: label.Value}) } + return models.Tags{Tags: tags}, storage.PromSamplesToM3Datapoints(curr.Samples), xtime.Millisecond +} + +func (i *promTSIter) Reset() error { + i.idx = -1 + return nil +} - metricsAppender.Finalize() - return multiErr.LastError() +func (i *promTSIter) Error() error { + return nil } diff --git a/src/query/api/v1/handler/prometheus/remote/write_test.go b/src/query/api/v1/handler/prometheus/remote/write_test.go index 05329c4e5a..586fc9cc73 100644 --- a/src/query/api/v1/handler/prometheus/remote/write_test.go +++ b/src/query/api/v1/handler/prometheus/remote/write_test.go @@ -27,23 +27,21 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/dbnode/x/metrics" "github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote/test" - "github.com/m3db/m3/src/query/test/m3" "github.com/m3db/m3/src/query/util/logging" xclock "github.com/m3db/m3x/clock" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/uber-go/tally" ) func TestPromWriteParsing(t *testing.T) { logging.InitWithCores(nil) - ctrl := gomock.NewController(t) - storage, _ := m3.NewStorageAndSession(t, ctrl) - promWrite := &PromWriteHandler{store: storage} + promWrite := &PromWriteHandler{} promReq := test.GeneratePromWriteRequest() promReqBody := test.GeneratePromWriteRequestBody(t, promReq) @@ -58,10 +56,10 @@ func TestPromWrite(t *testing.T) { logging.InitWithCores(nil) ctrl := gomock.NewController(t) - storage, session := m3.NewStorageAndSession(t, ctrl) - session.EXPECT().WriteTagged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl) + mockDownsamplerAndWriter.EXPECT().WriteBatch(gomock.Any(), gomock.Any()) - promWrite := &PromWriteHandler{store: storage} + promWrite := &PromWriteHandler{downsamplerAndWriter: mockDownsamplerAndWriter} promReq := test.GeneratePromWriteRequest() promReqBody := test.GeneratePromWriteRequestBody(t, promReq) @@ -78,15 +76,18 @@ func TestWriteErrorMetricCount(t *testing.T) { logging.InitWithCores(nil) ctrl := gomock.NewController(t) - storage, session := m3.NewStorageAndSession(t, ctrl) - session.EXPECT().WriteTagged(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl) + mockDownsamplerAndWriter.EXPECT().WriteBatch(gomock.Any(), gomock.Any()) reporter := xmetrics.NewTestStatsReporter(xmetrics.NewTestStatsReporterOptions()) scope, closer := tally.NewRootScope(tally.ScopeOptions{Reporter: reporter}, time.Millisecond) defer closer.Close() writeMetrics := newPromWriteMetrics(scope) - promWrite := &PromWriteHandler{store: storage, promWriteMetrics: writeMetrics} + promWrite := &PromWriteHandler{ + downsamplerAndWriter: mockDownsamplerAndWriter, + promWriteMetrics: writeMetrics, + } req, _ := http.NewRequest("POST", PromWriteURL, nil) promWrite.ServeHTTP(httptest.NewRecorder(), req) diff --git a/src/query/generated/mocks/generate.go b/src/query/generated/mocks/generate.go index 85bc8843d3..d5f88dfec6 100644 --- a/src/query/generated/mocks/generate.go +++ b/src/query/generated/mocks/generate.go @@ -21,6 +21,7 @@ // mockgen rules for generating mocks for exported interfaces (reflection mode). //go:generate sh -c "mockgen -package=downsample $PACKAGE/src/cmd/services/m3coordinator/downsample Downsampler,MetricsAppender,SamplesAppender | genclean -pkg $PACKAGE/src/cmd/services/m3coordinator/downsample -out $GOPATH/src/$PACKAGE/src/cmd/services/m3coordinator/downsample/downsample_mock.go" //go:generate sh -c "mockgen -package=block -destination=$GOPATH/src/$PACKAGE/src/query/block/block_mock.go $PACKAGE/src/query/block Block,StepIter,SeriesIter,Builder,Step" +//go:generate sh -c "mockgen -package=ingest -destination=$GOPATH/src/$PACKAGE/src/cmd/services/m3coordinator/ingest/write_mock.go $PACKAGE/src/cmd/services/m3coordinator/ingest DownsamplerAndWriter" // mockgen rules for generating mocks for unexported interfaces (file mode). //go:generate sh -c "mockgen -package=m3ql -destination=$GOPATH/src/github.com/m3db/m3/src/query/parser/m3ql/types_mock.go -source=$GOPATH/src/github.com/m3db/m3/src/query/parser/m3ql/types.go" From 176d8628455c17cdd17885ef394da34936b8e959 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Tue, 22 Jan 2019 15:08:37 -0500 Subject: [PATCH 11/70] handle nil downsampler --- .../services/m3coordinator/ingest/write.go | 82 ++++++++++--------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index 71db890bc0..f018fb341a 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -81,28 +81,30 @@ func (d *downsamplerAndWriter) Write( datapoints ts.Datapoints, unit xtime.Unit, ) error { - appender, err := d.downsampler.NewMetricsAppender() - if err != nil { - return err - } - - for _, tag := range tags.Tags { - appender.AddTag(tag.Name, tag.Value) - } + if d.downsampler != nil { + appender, err := d.downsampler.NewMetricsAppender() + if err != nil { + return err + } - samplesAppender, err := appender.SamplesAppender() - if err != nil { - return err - } + for _, tag := range tags.Tags { + appender.AddTag(tag.Name, tag.Value) + } - for _, dp := range datapoints { - err := samplesAppender.AppendGaugeSample(dp.Value) + samplesAppender, err := appender.SamplesAppender() if err != nil { return err } - } - appender.Finalize() + for _, dp := range datapoints { + err := samplesAppender.AppendGaugeSample(dp.Value) + if err != nil { + return err + } + } + + appender.Finalize() + } return d.store.Write(ctx, &storage.WriteQuery{ Tags: tags, @@ -118,38 +120,40 @@ func (d *downsamplerAndWriter) WriteBatch( ctx context.Context, iter DownsampleAndWriteIter, ) error { - // Write aggregated. - appender, err := d.downsampler.NewMetricsAppender() - if err != nil { - return err - } - - for iter.Next() { - appender.Reset() - tags, datapoints, _ := iter.Current() - for _, tag := range tags.Tags { - appender.AddTag(tag.Name, tag.Value) - } - - samplesAppender, err := appender.SamplesAppender() + if d.downsampler != nil { + // Write aggregated. + appender, err := d.downsampler.NewMetricsAppender() if err != nil { return err } - for _, dp := range datapoints { - err := samplesAppender.AppendGaugeSample(dp.Value) + for iter.Next() { + appender.Reset() + tags, datapoints, _ := iter.Current() + for _, tag := range tags.Tags { + appender.AddTag(tag.Name, tag.Value) + } + + samplesAppender, err := appender.SamplesAppender() if err != nil { return err } + + for _, dp := range datapoints { + err := samplesAppender.AppendGaugeSample(dp.Value) + if err != nil { + return err + } + } } - } - appender.Finalize() + appender.Finalize() - if err := iter.Error(); err != nil { - return err - } - if err := iter.Reset(); err != nil { - return err + if err := iter.Error(); err != nil { + return err + } + if err := iter.Reset(); err != nil { + return err + } } // Write unaggregated. From ffd52baf489db2b9adb1f2ceed2bf87d6a83c914 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Tue, 22 Jan 2019 15:09:55 -0500 Subject: [PATCH 12/70] add test for case where downsampler is nil --- .../m3coordinator/ingest/write_test.go | 33 +++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/src/cmd/services/m3coordinator/ingest/write_test.go b/src/cmd/services/m3coordinator/ingest/write_test.go index 690d7ba5fb..42a550f6cb 100644 --- a/src/cmd/services/m3coordinator/ingest/write_test.go +++ b/src/cmd/services/m3coordinator/ingest/write_test.go @@ -174,6 +174,20 @@ func TestDownsampleAndWrite(t *testing.T) { require.NoError(t, err) } +func TestDownsampleAndWriteNoDownsampler(t *testing.T) { + ctrl := gomock.NewController(t) + downAndWrite, _, session := newTestDownsamplerAndWriter(t, ctrl) + downAndWrite.downsampler = nil + + for _, dp := range testDatapoints1 { + session.EXPECT().WriteTagged( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), gomock.Any()) + } + + err := downAndWrite.Write(context.Background(), testTags1, testDatapoints1, xtime.Second) + require.NoError(t, err) +} + func TestDownsampleAndWriteBatch(t *testing.T) { ctrl := gomock.NewController(t) downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl) @@ -218,6 +232,25 @@ func TestDownsampleAndWriteBatch(t *testing.T) { require.NoError(t, err) } +func TestDownsampleAndWriteBatchNoDownsampler(t *testing.T) { + ctrl := gomock.NewController(t) + downAndWrite, _, session := newTestDownsamplerAndWriter(t, ctrl) + downAndWrite.downsampler = nil + + for _, dp := range testDatapoints1 { + session.EXPECT().WriteTagged( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), gomock.Any()) + } + for _, dp := range testDatapoints2 { + session.EXPECT().WriteTagged( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), dp.Value, gomock.Any(), gomock.Any()) + } + + iter := newTestIter(testEntries) + err := downAndWrite.WriteBatch(context.Background(), iter) + require.NoError(t, err) +} + func newTestDownsamplerAndWriter( t *testing.T, ctrl *gomock.Controller, From 8dba5d40fb0dd534030a9493a4ff35e02503cdc7 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Tue, 22 Jan 2019 15:25:23 -0500 Subject: [PATCH 13/70] use downsamplerAndWriter in carbon ingester --- .../m3coordinator/ingest/carbon/ingest.go | 27 +++++++++---------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 7948d65f1a..788f86bcef 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -22,11 +22,13 @@ package ingestcarbon import ( "bytes" + "context" "errors" "fmt" "net" "sync" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/metrics/carbon" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/ts" @@ -51,12 +53,6 @@ var ( errCannotGenerateTagsFromEmptyName = errors.New("cannot generate tags from empty name") ) -// StorageWriter is the interface that must be provided to the ingester so that it can -// write the ingested metrics. -type StorageWriter interface { - Write(tags models.Tags, dp ts.Datapoint, unit xtime.Unit) error -} - // Options configures the ingester. type Options struct { InstrumentOptions instrument.Options @@ -65,19 +61,19 @@ type Options struct { // NewIngester returns an ingester for carbon metrics. func NewIngester( - storage StorageWriter, + downsamplerAndWriter ingest.DownsamplerAndWriter, opts Options, ) m3xserver.Handler { return &ingester{ - storage: storage, - opts: opts, + downsamplerAndWriter: downsamplerAndWriter, + opts: opts, } } type ingester struct { - storage StorageWriter - opts Options - conn net.Conn + downsamplerAndWriter ingest.DownsamplerAndWriter + opts Options + conn net.Conn } func (i *ingester) Handle(conn net.Conn) { @@ -96,8 +92,10 @@ func (i *ingester) Handle(conn net.Conn) { wg.Add(1) i.opts.WorkerPool.Go(func() { - dp := ts.Datapoint{Timestamp: timestamp, Value: value} - i.storage.Write(models.Tags{}, dp, xtime.Second) + // TODO: Real context? + datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} + i.downsamplerAndWriter.Write( + context.Background(), models.Tags{}, datapoints, xtime.Second) wg.Done() }) // i.metrics.malformedCounter.Inc(int64(s.MalformedCount)) @@ -110,6 +108,7 @@ func (i *ingester) Handle(conn net.Conn) { func (i *ingester) Close() { // TODO: Log error + // TODO: Not sure this will do anything? i.conn.Close() } From 1d25d1b213ef0a0fe10ff07b490162ec7c1451c5 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Tue, 22 Jan 2019 18:06:38 -0500 Subject: [PATCH 14/70] wip --- src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index e33ec07372..37385d8206 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -28,6 +28,10 @@ import ( "github.com/stretchr/testify/require" ) +func TestIngesterHandleConn(t *testing.T) { + +} + func TestGenerateTagsFromName(t *testing.T) { testCases := []struct { name string From 3f6e5eb160619daf1c0f47c2cb1ac31d9090f429 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Wed, 23 Jan 2019 17:42:57 -0500 Subject: [PATCH 15/70] wip --- .../m3coordinator/ingest/carbon/ingest.go | 33 +++++-- .../ingest/carbon/ingest_test.go | 98 +++++++++++++++++++ src/metrics/carbon/parser.go | 3 +- src/metrics/carbon/parser_test.go | 22 +++-- 4 files changed, 138 insertions(+), 18 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 788f86bcef..e026b9262c 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -87,21 +87,40 @@ func (i *ingester) Handle(conn net.Conn) { s = carbon.NewScanner(conn) ) + fmt.Println("start scan") for s.Scan() { _, timestamp, value := s.Metric() + fmt.Println("da fuq") wg.Add(1) - i.opts.WorkerPool.Go(func() { - // TODO: Real context? - datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} - i.downsamplerAndWriter.Write( - context.Background(), models.Tags{}, datapoints, xtime.Second) - wg.Done() - }) + if i.opts.WorkerPool != nil { + i.opts.WorkerPool.Go(func() { + // TODO: Real context? + datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} + i.downsamplerAndWriter.Write( + context.Background(), models.Tags{}, datapoints, xtime.Second) + wg.Done() + }) + } else { + go func() { + // TODO: Real context? + datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} + fmt.Println("hmm:", datapoints) + i.downsamplerAndWriter.Write( + context.Background(), models.Tags{}, datapoints, xtime.Second) + fmt.Println("hmm done:", datapoints) + wg.Done() + }() + } + // i.metrics.malformedCounter.Inc(int64(s.MalformedCount)) s.MalformedCount = 0 } + fmt.Println("end scan") + if err := s.Err(); err != nil { + fmt.Println(err) + } // Wait for all outstanding writes wg.Wait() } diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index 37385d8206..cc07b162a1 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -21,15 +21,47 @@ package ingestcarbon import ( + "bytes" "fmt" + "io" + "net" "testing" + "time" + "github.com/golang/mock/gomock" + + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/query/models" + "github.com/m3db/m3/src/query/ts" + xtime "github.com/m3db/m3x/time" "github.com/stretchr/testify/require" ) +const ( + numLinesInTestPacket = 10 +) + +// Created by init(). +var ( + testMetrics = []testMetric{} + testPacket = []byte{} +) + func TestIngesterHandleConn(t *testing.T) { + ctrl := gomock.NewController(t) + mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl) + for _, metric := range testMetrics { + dp := []ts.Datapoint{{Timestamp: time.Unix(int64(metric.timestamp), 0), Value: metric.value}} + fmt.Println("expecting: ", dp) + mockDownsamplerAndWriter.EXPECT(). + Write(gomock.Any(), gomock.Any(), dp, xtime.Second).AnyTimes() + } + + byteConn := &byteConn{b: bytes.NewBuffer(testPacket)} + ingester := NewIngester(mockDownsamplerAndWriter, Options{}) + ingester.Handle(byteConn) + panic("hmm") } func TestGenerateTagsFromName(t *testing.T) { @@ -80,3 +112,69 @@ func TestGenerateTagsFromName(t *testing.T) { require.Equal(t, tc.expectedTags, tags.Tags) } } + +// byteConn implements the net.Conn interface so that we can test the handler without +// going over the network. +type byteConn struct { + b io.Reader + closed bool +} + +func (b *byteConn) Read(buf []byte) (n int, err error) { + if !b.closed { + return b.b.Read(buf) + } + + panic("eof") + return 0, io.EOF +} + +func (b *byteConn) Write(buf []byte) (n int, err error) { + panic("not_imlpemented") +} + +func (b *byteConn) Close() error { + b.closed = true + return nil +} + +func (b *byteConn) LocalAddr() net.Addr { + panic("not_imlpemented") +} + +func (b *byteConn) RemoteAddr() net.Addr { + panic("not_imlpemented") +} + +func (b *byteConn) SetDeadline(t time.Time) error { + panic("not_imlpemented") +} + +func (b *byteConn) SetReadDeadline(t time.Time) error { + panic("not_imlpemented") +} + +func (b *byteConn) SetWriteDeadline(t time.Time) error { + panic("not_imlpemented") +} + +type testMetric struct { + metric []byte + timestamp int + value float64 +} + +func init() { + for i := 0; i < numLinesInTestPacket; i++ { + metric := []byte(fmt.Sprintf("test_metric_%d", i)) + + testMetrics = append(testMetrics, testMetric{ + metric: metric, + timestamp: i, + value: float64(i), + }) + + line := []byte(fmt.Sprintf("%s %d %d\n", string(metric), i, i)) + testPacket = append(testPacket, line...) + } +} diff --git a/src/metrics/carbon/parser.go b/src/metrics/carbon/parser.go index 3dbe069b74..a457a9a2b2 100644 --- a/src/metrics/carbon/parser.go +++ b/src/metrics/carbon/parser.go @@ -247,6 +247,7 @@ func (s *Scanner) Scan() bool { var err error if s.path, s.timestamp, s.value, err = Parse(s.scanner.Bytes()); err != nil { + // TODO: Log malformed s.MalformedCount++ continue } @@ -277,7 +278,7 @@ func parseWordOffsets(b []byte) (int, int) { } } - valEnd := -1 + valEnd := valStart reachedEnd := true for i := valStart + 1; i < len(b); i++ { valEnd = i diff --git a/src/metrics/carbon/parser_test.go b/src/metrics/carbon/parser_test.go index 367487751d..914af9da67 100644 --- a/src/metrics/carbon/parser_test.go +++ b/src/metrics/carbon/parser_test.go @@ -44,16 +44,18 @@ type carbonLine struct { } var testLines = []carbonLine{ - {"foo.bar.zed", 45565.02, time.Unix(1428951394, 0), - "foo.bar.zed 45565.02 1428951394"}, - {"foo.bar.quad", 10.0, time.Unix(1428951394, 0), - "foo.bar.quad 10 1428951394"}, - {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), - "foo.bar.nan nan 1428951394"}, - {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), - "foo.bar.nan -NaN 1428951394"}, - {"foo.bar.negative", -18000.00000, time.Unix(1429480924, 0), - "foo.bar.negative -18000.000000 1429480924"}, + // {"foo.bar.zed", 45565.02, time.Unix(1428951394, 0), + // "foo.bar.zed 45565.02 1428951394"}, + // {"foo.bar.quad", 10.0, time.Unix(1428951394, 0), + // "foo.bar.quad 10 1428951394"}, + // {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), + // "foo.bar.nan nan 1428951394"}, + // {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), + // "foo.bar.nan -NaN 1428951394"}, + // {"foo.bar.negative", -18000.00000, time.Unix(1429480924, 0), + // "foo.bar.negative -18000.000000 1429480924"}, + {"short", 1, time.Unix(1, 0), + "short 1 1"}, } func TestScannerMetric(t *testing.T) { From caf97a058b962ee9f8db10c813b323baecd547d0 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 10:11:12 -0500 Subject: [PATCH 16/70] update test --- .../m3coordinator/ingest/carbon/ingest.go | 17 +++-- .../ingest/carbon/ingest_test.go | 65 ++++++++++++++----- src/metrics/carbon/parser.go | 3 +- 3 files changed, 62 insertions(+), 23 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index e026b9262c..612ff44e7a 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -87,11 +87,10 @@ func (i *ingester) Handle(conn net.Conn) { s = carbon.NewScanner(conn) ) - fmt.Println("start scan") for s.Scan() { - _, timestamp, value := s.Metric() + name, timestamp, value := s.Metric() + name = append([]byte(nil), name...) // Copy name. - fmt.Println("da fuq") wg.Add(1) if i.opts.WorkerPool != nil { i.opts.WorkerPool.Go(func() { @@ -105,10 +104,14 @@ func (i *ingester) Handle(conn net.Conn) { go func() { // TODO: Real context? datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} - fmt.Println("hmm:", datapoints) + tags, err := generateTagsFromName(name) + if err != nil { + // TODO: Log error + fmt.Println(err) + return + } i.downsamplerAndWriter.Write( - context.Background(), models.Tags{}, datapoints, xtime.Second) - fmt.Println("hmm done:", datapoints) + context.Background(), tags, datapoints, xtime.Second) wg.Done() }() } @@ -116,11 +119,11 @@ func (i *ingester) Handle(conn net.Conn) { // i.metrics.malformedCounter.Inc(int64(s.MalformedCount)) s.MalformedCount = 0 } - fmt.Println("end scan") if err := s.Err(); err != nil { fmt.Println(err) } + // Wait for all outstanding writes wg.Wait() } diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index cc07b162a1..4bff215427 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -22,9 +22,12 @@ package ingestcarbon import ( "bytes" + "context" "fmt" "io" "net" + "sort" + "sync" "testing" "time" @@ -38,7 +41,9 @@ import ( ) const ( - numLinesInTestPacket = 10 + // Keep this value large enough to catch issues like the ingester + // not copying the name. + numLinesInTestPacket = 10000 ) // Created by init(). @@ -51,17 +56,28 @@ func TestIngesterHandleConn(t *testing.T) { ctrl := gomock.NewController(t) mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl) - for _, metric := range testMetrics { - dp := []ts.Datapoint{{Timestamp: time.Unix(int64(metric.timestamp), 0), Value: metric.value}} - fmt.Println("expecting: ", dp) - mockDownsamplerAndWriter.EXPECT(). - Write(gomock.Any(), gomock.Any(), dp, xtime.Second).AnyTimes() - } + var ( + foundLock = sync.Mutex{} + found = []testMetric{} + ) + mockDownsamplerAndWriter.EXPECT(). + Write(gomock.Any(), gomock.Any(), gomock.Any(), xtime.Second).DoAndReturn(func( + _ context.Context, + tags models.Tags, + dp ts.Datapoints, + unit xtime.Unit, + ) { + foundLock.Lock() + found = append(found, testMetric{ + tags: tags, timestamp: int(dp[0].Timestamp.Unix()), value: dp[0].Value}) + foundLock.Unlock() + }).Return(nil).AnyTimes() byteConn := &byteConn{b: bytes.NewBuffer(testPacket)} ingester := NewIngester(mockDownsamplerAndWriter, Options{}) ingester.Handle(byteConn) - panic("hmm") + + assertTestMetricsAreEqual(t, testMetrics, found) } func TestGenerateTagsFromName(t *testing.T) { @@ -125,12 +141,11 @@ func (b *byteConn) Read(buf []byte) (n int, err error) { return b.b.Read(buf) } - panic("eof") return 0, io.EOF } func (b *byteConn) Write(buf []byte) (n int, err error) { - panic("not_imlpemented") + panic("not_implemented") } func (b *byteConn) Close() error { @@ -139,37 +154,57 @@ func (b *byteConn) Close() error { } func (b *byteConn) LocalAddr() net.Addr { - panic("not_imlpemented") + panic("not_implemented") } func (b *byteConn) RemoteAddr() net.Addr { - panic("not_imlpemented") + panic("not_implemented") } func (b *byteConn) SetDeadline(t time.Time) error { - panic("not_imlpemented") + panic("not_implemented") } func (b *byteConn) SetReadDeadline(t time.Time) error { - panic("not_imlpemented") + panic("not_implemented") } func (b *byteConn) SetWriteDeadline(t time.Time) error { - panic("not_imlpemented") + panic("not_implemented") } type testMetric struct { metric []byte + tags models.Tags timestamp int value float64 } +func assertTestMetricsAreEqual(t *testing.T, a, b []testMetric) { + require.Equal(t, len(a), len(b)) + + sort.Slice(b, func(i, j int) bool { + return b[i].timestamp < b[j].timestamp + }) + + for i, f := range b { + require.Equal(t, a[i].tags, f.tags) + require.Equal(t, a[i].timestamp, f.timestamp) + require.Equal(t, a[i].value, f.value) + } +} + func init() { for i := 0; i < numLinesInTestPacket; i++ { metric := []byte(fmt.Sprintf("test_metric_%d", i)) + tags, err := generateTagsFromName(metric) + if err != nil { + panic(err) + } testMetrics = append(testMetrics, testMetric{ metric: metric, + tags: tags, timestamp: i, value: float64(i), }) diff --git a/src/metrics/carbon/parser.go b/src/metrics/carbon/parser.go index a457a9a2b2..f7381aeed7 100644 --- a/src/metrics/carbon/parser.go +++ b/src/metrics/carbon/parser.go @@ -247,7 +247,8 @@ func (s *Scanner) Scan() bool { var err error if s.path, s.timestamp, s.value, err = Parse(s.scanner.Bytes()); err != nil { - // TODO: Log malformed + // TODO: Convert to log + fmt.Printf("malformed: %s, err: %s\n", s.path, err.Error()) s.MalformedCount++ continue } From 761c2a1ad6e7b6548ea82048ba8a6d13faff16cf Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 11:00:25 -0500 Subject: [PATCH 17/70] wire up carbon ingestion server --- .../m3coordinator/ingest/carbon/ingest.go | 4 +- .../services/m3coordinator/ingest/write.go | 6 ++ .../m3coordinator/ingest/write_mock.go | 15 +++++ src/cmd/services/m3query/config/config.go | 10 ++++ src/query/api/v1/httpd/handler.go | 56 +++++++++--------- src/query/server/server.go | 59 ++++++++++++++++++- 6 files changed, 116 insertions(+), 34 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 612ff44e7a..cd3a7804a1 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -63,11 +63,11 @@ type Options struct { func NewIngester( downsamplerAndWriter ingest.DownsamplerAndWriter, opts Options, -) m3xserver.Handler { +) (m3xserver.Handler, error) { return &ingester{ downsamplerAndWriter: downsamplerAndWriter, opts: opts, - } + }, nil } type ingester struct { diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index f018fb341a..648215bbca 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -55,6 +55,8 @@ type DownsamplerAndWriter interface { ctx context.Context, iter DownsampleAndWriteIter, ) error + + Storage() storage.Storage } // downsamplerAndWriter encapsulates the logic for writing data to the downsampler, @@ -186,3 +188,7 @@ func (d *downsamplerAndWriter) WriteBatch( wg.Wait() return multiErr.LastError() } + +func (d *downsamplerAndWriter) Storage() storage.Storage { + return d.store +} diff --git a/src/cmd/services/m3coordinator/ingest/write_mock.go b/src/cmd/services/m3coordinator/ingest/write_mock.go index d1f9577a5b..55ca2a6a58 100644 --- a/src/cmd/services/m3coordinator/ingest/write_mock.go +++ b/src/cmd/services/m3coordinator/ingest/write_mock.go @@ -29,6 +29,7 @@ import ( "reflect" "github.com/m3db/m3/src/query/models" + "github.com/m3db/m3/src/query/storage" "github.com/m3db/m3/src/query/ts" "github.com/m3db/m3x/time" @@ -58,6 +59,20 @@ func (m *MockDownsamplerAndWriter) EXPECT() *MockDownsamplerAndWriterMockRecorde return m.recorder } +// Storage mocks base method +func (m *MockDownsamplerAndWriter) Storage() storage.Storage { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Storage") + ret0, _ := ret[0].(storage.Storage) + return ret0 +} + +// Storage indicates an expected call of Storage +func (mr *MockDownsamplerAndWriterMockRecorder) Storage() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Storage", reflect.TypeOf((*MockDownsamplerAndWriter)(nil).Storage)) +} + // Write mocks base method func (m *MockDownsamplerAndWriter) Write(arg0 context.Context, arg1 models.Tags, arg2 ts.Datapoints, arg3 time.Unit) error { m.ctrl.T.Helper() diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go index 4aeac23a64..295b8d482b 100644 --- a/src/cmd/services/m3query/config/config.go +++ b/src/cmd/services/m3query/config/config.go @@ -94,6 +94,9 @@ type Configuration struct { // Ingest is the ingest server. Ingest *IngestConfiguration `yaml:"ingest"` + // Carbon is the carbon configuration. + Carbon *CarbonConfiguration `yaml:"carbon"` + // Limits specifies limits on per-query resource usage. Limits LimitsConfiguration `yaml:"limits"` } @@ -133,6 +136,13 @@ type IngestConfiguration struct { M3Msg m3msg.Configuration `yaml:"m3msg"` } +// CarbonConfiguration is the configuration for the carbon server. +type CarbonConfiguration struct { + Enabled bool `yaml:"enabled"` + MaxConcurrency int `yaml:"max_concurrency"` + ListenAddress string `yaml:"listen_address"` +} + // LocalConfiguration is the local embedded configuration if running // coordinator embedded in the DB. type LocalConfiguration struct { diff --git a/src/query/api/v1/httpd/handler.go b/src/query/api/v1/httpd/handler.go index 7be06d0003..d7121bb9e1 100644 --- a/src/query/api/v1/httpd/handler.go +++ b/src/query/api/v1/httpd/handler.go @@ -27,7 +27,7 @@ import ( "time" clusterclient "github.com/m3db/m3/src/cluster/client" - "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" dbconfig "github.com/m3db/m3/src/cmd/services/m3dbnode/config" "github.com/m3db/m3/src/cmd/services/m3query/config" "github.com/m3db/m3/src/query/api/v1/handler" @@ -63,18 +63,18 @@ var ( // Handler represents an HTTP handler. type Handler struct { - router *mux.Router - handler http.Handler - storage storage.Storage - downsampler downsample.Downsampler - engine *executor.Engine - clusters m3.Clusters - clusterClient clusterclient.Client - config config.Configuration - embeddedDbCfg *dbconfig.DBConfiguration - scope tally.Scope - createdAt time.Time - tagOptions models.TagOptions + router *mux.Router + handler http.Handler + storage storage.Storage + downsamplerAndWriter ingest.DownsamplerAndWriter + engine *executor.Engine + clusters m3.Clusters + clusterClient clusterclient.Client + config config.Configuration + embeddedDbCfg *dbconfig.DBConfiguration + scope tally.Scope + createdAt time.Time + tagOptions models.TagOptions } // Router returns the http handler registered with all relevant routes for query. @@ -84,9 +84,8 @@ func (h *Handler) Router() http.Handler { // NewHandler returns a new instance of handler with routes. func NewHandler( - storage storage.Storage, + downsamplerAndWriter ingest.DownsamplerAndWriter, tagOptions models.TagOptions, - downsampler downsample.Downsampler, engine *executor.Engine, m3dbClusters m3.Clusters, clusterClient clusterclient.Client, @@ -105,18 +104,18 @@ func NewHandler( } h := &Handler{ - router: r, - handler: withMiddleware, - storage: storage, - downsampler: downsampler, - engine: engine, - clusters: m3dbClusters, - clusterClient: clusterClient, - config: cfg, - embeddedDbCfg: embeddedDbCfg, - scope: scope, - createdAt: time.Now(), - tagOptions: tagOptions, + router: r, + handler: withMiddleware, + storage: downsamplerAndWriter.Storage(), + downsamplerAndWriter: downsamplerAndWriter, + engine: engine, + clusters: m3dbClusters, + clusterClient: clusterClient, + config: cfg, + embeddedDbCfg: embeddedDbCfg, + scope: scope, + createdAt: time.Now(), + tagOptions: tagOptions, } return h, nil } @@ -133,8 +132,7 @@ func (h *Handler) RegisterRoutes() error { // Prometheus remote read/write endpoints promRemoteReadHandler := remote.NewPromReadHandler(h.engine, h.scope.Tagged(remoteSource)) promRemoteWriteHandler, err := remote.NewPromWriteHandler( - h.storage, - h.downsampler, + h.downsamplerAndWriter, h.tagOptions, h.scope.Tagged(remoteSource), ) diff --git a/src/query/server/server.go b/src/query/server/server.go index 7958a0f1bd..94e20bc7f5 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -33,6 +33,8 @@ import ( clusterclient "github.com/m3db/m3/src/cluster/client" etcdclient "github.com/m3db/m3/src/cluster/client/etcd" "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest/carbon" dbconfig "github.com/m3db/m3/src/cmd/services/m3dbnode/config" "github.com/m3db/m3/src/cmd/services/m3query/config" "github.com/m3db/m3/src/dbnode/client" @@ -57,6 +59,7 @@ import ( "github.com/m3db/m3x/ident" "github.com/m3db/m3x/instrument" "github.com/m3db/m3x/pool" + xserver "github.com/m3db/m3x/server" xsync "github.com/m3db/m3x/sync" xtime "github.com/m3db/m3x/time" @@ -204,7 +207,8 @@ func Run(runOpts RunOptions) { engine := executor.NewEngine(backendStorage, scope.SubScope("engine")) - handler, err := httpd.NewHandler(backendStorage, tagOptions, downsampler, engine, + downsamplerAndWriter := ingest.NewDownsamplerAndWriter(backendStorage, downsampler) + handler, err := httpd.NewHandler(downsamplerAndWriter, tagOptions, engine, m3dbClusters, clusterClient, cfg, runOpts.DBConfig, scope) if err != nil { logger.Fatal("unable to set up handlers", zap.Error(err)) @@ -236,7 +240,7 @@ func Run(runOpts RunOptions) { }() if cfg.Ingest != nil { - logger.Info("starting m3msg server ") + logger.Info("starting m3msg server") ingester, err := cfg.Ingest.Ingester.NewIngester(backendStorage, instrumentOptions) if err != nil { logger.Fatal("unable to create ingester", zap.Error(err)) @@ -244,7 +248,7 @@ func Run(runOpts RunOptions) { server, err := cfg.Ingest.M3Msg.NewServer( ingester.Ingest, - instrumentOptions.SetMetricsScope(scope.SubScope("m3msg")), + instrumentOptions.SetMetricsScope(scope.SubScope("ingest-m3msg")), ) if err != nil { @@ -261,6 +265,55 @@ func Run(runOpts RunOptions) { logger.Info("no m3msg server configured") } + if cfg.Carbon != nil && cfg.Carbon.Enabled { + logger.Info("starting carbon server") + + var ( + carbonIOpts = instrumentOptions.SetMetricsScope( + instrumentOptions.MetricsScope().SubScope("ingest-carbon")) + carbonWorkerPoolOpts xsync.PooledWorkerPoolOptions + carbonWorkerPoolSize int + ) + if cfg.Carbon.MaxConcurrency > 0 { + // Use a bounded worker pool if they requested a specific maximum concurrency. + carbonWorkerPoolOpts = xsync.NewPooledWorkerPoolOptions(). + SetGrowOnDemand(false). + SetInstrumentOptions(carbonIOpts) + carbonWorkerPoolSize = cfg.Carbon.MaxConcurrency + } else { + carbonWorkerPoolOpts = xsync.NewPooledWorkerPoolOptions(). + SetGrowOnDemand(true). + SetKillWorkerProbability(0.001) + // TODO: Constants? + carbonWorkerPoolSize = 1024 + } + workerPool, err := xsync.NewPooledWorkerPool(carbonWorkerPoolSize, carbonWorkerPoolOpts) + if err != nil { + logger.Fatal("unable to create worker pool for carbon ingester: %v", zap.Error(err)) + } + + ingester, err := ingestcarbon.NewIngester(nil, ingestcarbon.Options{ + InstrumentOptions: carbonIOpts, + WorkerPool: workerPool, + }) + if err != nil { + logger.Fatal("unable to create carbon ingester: %v", zap.Error(err)) + } + + // TODO: Default constants + listenAddress := fmt.Sprintf("0.0.0.0:7204") + if cfg.Carbon.ListenAddress != "" { + listenAddress = cfg.Carbon.ListenAddress + } + serverOpts := xserver.NewOptions().SetInstrumentOptions(carbonIOpts) + server := xserver.NewServer(listenAddress, ingester, serverOpts) + err = server.ListenAndServe() + if err != nil { + logger.Fatal("unable to start carbon ingestion server at listen address: %s, err: %v", + zap.String("listenAddress", listenAddress), zap.Error(err)) + } + } + var interruptCh <-chan error = make(chan error) if runOpts.InterruptCh != nil { interruptCh = runOpts.InterruptCh From a6a04ca6b621383ba0edb9d9cecc6a43a6241599 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 12:51:36 -0500 Subject: [PATCH 18/70] improve logging and bug fixes --- .../m3coordinator/ingest/carbon/ingest.go | 74 +++++++++---------- src/query/server/server.go | 3 +- src/query/storage/m3/storage.go | 1 + 3 files changed, 38 insertions(+), 40 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index cd3a7804a1..612ed80d79 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -73,65 +73,61 @@ func NewIngester( type ingester struct { downsamplerAndWriter ingest.DownsamplerAndWriter opts Options - conn net.Conn } func (i *ingester) Handle(conn net.Conn) { - if i.conn != nil { - // TODO: Something - } - i.conn = conn - var ( - wg = sync.WaitGroup{} - s = carbon.NewScanner(conn) + wg = sync.WaitGroup{} + s = carbon.NewScanner(conn) + logger = i.opts.InstrumentOptions.Logger() ) + logger.Debug("handling new carbon ingestion connection") for s.Scan() { name, timestamp, value := s.Metric() - name = append([]byte(nil), name...) // Copy name. + // Copy name since scanner bytes are recycled. + // TODO: Pool. + name = append([]byte(nil), name...) wg.Add(1) - if i.opts.WorkerPool != nil { - i.opts.WorkerPool.Go(func() { - // TODO: Real context? - datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} - i.downsamplerAndWriter.Write( - context.Background(), models.Tags{}, datapoints, xtime.Second) - wg.Done() - }) - } else { - go func() { - // TODO: Real context? - datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} - tags, err := generateTagsFromName(name) - if err != nil { - // TODO: Log error - fmt.Println(err) - return - } - i.downsamplerAndWriter.Write( - context.Background(), tags, datapoints, xtime.Second) - wg.Done() - }() - } + i.opts.WorkerPool.Go(func() { + datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} + // TODO: Pool. + tags, err := generateTagsFromName(name) + if err != nil { + logger.Errorf("err generating tags from carbon name: %s, err: %s", + string(name), err) + return + } + + // TODO: Real context? + err = i.downsamplerAndWriter.Write( + context.Background(), tags, datapoints, xtime.Second) + if err != nil { + logger.Errorf("err writing carbon metric: %s, err: %s", + string(name), err) + } + + wg.Done() + }) // i.metrics.malformedCounter.Inc(int64(s.MalformedCount)) s.MalformedCount = 0 } if err := s.Err(); err != nil { - fmt.Println(err) + logger.Errorf("encountered error during carbon ingestion when scanning connection: %s", err) } - // Wait for all outstanding writes + logger.Debugf("waiting for outstanding carbon ingestion writes to complete") wg.Wait() + logger.Debugf("all outstanding writes completed, shuttin down carbon ingestion handler") + + // Don't close the connection, that is the server's responsibility. } func (i *ingester) Close() { - // TODO: Log error - // TODO: Not sure this will do anything? - i.conn.Close() + // We don't maintain any state in-between connections so there is nothing to do here. } func newCarbonHandlerMetrics(m tally.Scope) carbonHandlerMetrics { @@ -200,11 +196,11 @@ func getOrGenerateKeyName(idx int) []byte { return preFormattedKeyNames[idx] } - return []byte(fmt.Sprintf("__$%d__", idx)) + return []byte(fmt.Sprintf("__graphite%d__", idx)) } func generateKeyName(idx int) []byte { - return []byte(fmt.Sprintf("__$%d__", idx)) + return []byte(fmt.Sprintf("__graphite%d__", idx)) } func init() { diff --git a/src/query/server/server.go b/src/query/server/server.go index 94e20bc7f5..d1ed63f21e 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -291,8 +291,9 @@ func Run(runOpts RunOptions) { if err != nil { logger.Fatal("unable to create worker pool for carbon ingester: %v", zap.Error(err)) } + workerPool.Init() - ingester, err := ingestcarbon.NewIngester(nil, ingestcarbon.Options{ + ingester, err := ingestcarbon.NewIngester(downsamplerAndWriter, ingestcarbon.Options{ InstrumentOptions: carbonIOpts, WorkerPool: workerPool, }) diff --git a/src/query/storage/m3/storage.go b/src/query/storage/m3/storage.go index c139bf2b10..02785d7f16 100644 --- a/src/query/storage/m3/storage.go +++ b/src/query/storage/m3/storage.go @@ -390,6 +390,7 @@ func (s *m3storage) Write( // Special case single datapoint because it is common and we // can avoid the overhead of a waitgroup, goroutine, multierr, // iterator duplication etc. + fmt.Println("writing tags: ", query.Tags) return s.writeSingle( ctx, query, query.Datapoints[0], id, tagIterator) } From a2e7898128f9a35d60096e935c3bfc4f9e1bf861 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 13:01:21 -0500 Subject: [PATCH 19/70] Fix carbon ingestion tests --- .../ingest/carbon/ingest_test.go | 31 ++++++++++++++----- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index 4bff215427..e0feadc22c 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -36,6 +36,8 @@ import ( "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/ts" + "github.com/m3db/m3x/instrument" + xsync "github.com/m3db/m3x/sync" xtime "github.com/m3db/m3x/time" "github.com/stretchr/testify/require" ) @@ -50,6 +52,11 @@ const ( var ( testMetrics = []testMetric{} testPacket = []byte{} + + testOptions = Options{ + InstrumentOptions: instrument.NewOptions(), + WorkerPool: nil, // Set by init(). + } ) func TestIngesterHandleConn(t *testing.T) { @@ -74,7 +81,8 @@ func TestIngesterHandleConn(t *testing.T) { }).Return(nil).AnyTimes() byteConn := &byteConn{b: bytes.NewBuffer(testPacket)} - ingester := NewIngester(mockDownsamplerAndWriter, Options{}) + ingester, err := NewIngester(mockDownsamplerAndWriter, testOptions) + require.NoError(t, err) ingester.Handle(byteConn) assertTestMetricsAreEqual(t, testMetrics, found) @@ -89,23 +97,23 @@ func TestGenerateTagsFromName(t *testing.T) { { name: "foo", expectedTags: []models.Tag{ - {Name: []byte("__$0__"), Value: []byte("foo")}, + {Name: []byte("__graphite0__"), Value: []byte("foo")}, }, }, { name: "foo.bar.baz", expectedTags: []models.Tag{ - {Name: []byte("__$0__"), Value: []byte("foo")}, - {Name: []byte("__$1__"), Value: []byte("bar")}, - {Name: []byte("__$2__"), Value: []byte("baz")}, + {Name: []byte("__graphite0__"), Value: []byte("foo")}, + {Name: []byte("__graphite1__"), Value: []byte("bar")}, + {Name: []byte("__graphite2__"), Value: []byte("baz")}, }, }, { name: "foo.bar.baz.", expectedTags: []models.Tag{ - {Name: []byte("__$0__"), Value: []byte("foo")}, - {Name: []byte("__$1__"), Value: []byte("bar")}, - {Name: []byte("__$2__"), Value: []byte("baz")}, + {Name: []byte("__graphite0__"), Value: []byte("foo")}, + {Name: []byte("__graphite1__"), Value: []byte("bar")}, + {Name: []byte("__graphite2__"), Value: []byte("baz")}, }, }, { @@ -212,4 +220,11 @@ func init() { line := []byte(fmt.Sprintf("%s %d %d\n", string(metric), i, i)) testPacket = append(testPacket, line...) } + + var err error + testOptions.WorkerPool, err = xsync.NewPooledWorkerPool(16, xsync.NewPooledWorkerPoolOptions()) + if err != nil { + panic(err) + } + testOptions.WorkerPool.Init() } From 9fef8ae261592fc697594128362f7dc36bf31e89 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 13:02:29 -0500 Subject: [PATCH 20/70] remove print --- src/query/storage/m3/storage.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/query/storage/m3/storage.go b/src/query/storage/m3/storage.go index 02785d7f16..c139bf2b10 100644 --- a/src/query/storage/m3/storage.go +++ b/src/query/storage/m3/storage.go @@ -390,7 +390,6 @@ func (s *m3storage) Write( // Special case single datapoint because it is common and we // can avoid the overhead of a waitgroup, goroutine, multierr, // iterator duplication etc. - fmt.Println("writing tags: ", query.Tags) return s.writeSingle( ctx, query, query.Datapoints[0], id, tagIterator) } From 632ba0bca465f1e19869873449e35adae515286c Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 13:05:09 -0500 Subject: [PATCH 21/70] renable tests --- src/metrics/carbon/parser_test.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/metrics/carbon/parser_test.go b/src/metrics/carbon/parser_test.go index 914af9da67..d87b74680a 100644 --- a/src/metrics/carbon/parser_test.go +++ b/src/metrics/carbon/parser_test.go @@ -44,16 +44,16 @@ type carbonLine struct { } var testLines = []carbonLine{ - // {"foo.bar.zed", 45565.02, time.Unix(1428951394, 0), - // "foo.bar.zed 45565.02 1428951394"}, - // {"foo.bar.quad", 10.0, time.Unix(1428951394, 0), - // "foo.bar.quad 10 1428951394"}, - // {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), - // "foo.bar.nan nan 1428951394"}, - // {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), - // "foo.bar.nan -NaN 1428951394"}, - // {"foo.bar.negative", -18000.00000, time.Unix(1429480924, 0), - // "foo.bar.negative -18000.000000 1429480924"}, + {"foo.bar.zed", 45565.02, time.Unix(1428951394, 0), + "foo.bar.zed 45565.02 1428951394"}, + {"foo.bar.quad", 10.0, time.Unix(1428951394, 0), + "foo.bar.quad 10 1428951394"}, + {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), + "foo.bar.nan nan 1428951394"}, + {"foo.bar.nan", math.NaN(), time.Unix(1428951394, 0), + "foo.bar.nan -NaN 1428951394"}, + {"foo.bar.negative", -18000.00000, time.Unix(1429480924, 0), + "foo.bar.negative -18000.000000 1429480924"}, {"short", 1, time.Unix(1, 0), "short 1 1"}, } From 8c1c419414cb97fbc95772ee4ce12b025eb4822f Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 13:06:21 -0500 Subject: [PATCH 22/70] update start_m3 script --- scripts/development/m3_stack/docker-compose.yml | 2 ++ scripts/development/m3_stack/m3coordinator.yml | 3 +++ scripts/development/m3_stack/start_m3.sh | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/scripts/development/m3_stack/docker-compose.yml b/scripts/development/m3_stack/docker-compose.yml index 21fbbc3fb7..a4c7e63256 100644 --- a/scripts/development/m3_stack/docker-compose.yml +++ b/scripts/development/m3_stack/docker-compose.yml @@ -48,10 +48,12 @@ services: expose: - "7201" - "7203" + - "7204" - "7507" ports: - "0.0.0.0:7201:7201" - "0.0.0.0:7203:7203" + - "0.0.0.0:7204:7204" - "0.0.0.0:7507:7507" networks: - backend diff --git a/scripts/development/m3_stack/m3coordinator.yml b/scripts/development/m3_stack/m3coordinator.yml index cbaeddc5df..93a631b7ad 100644 --- a/scripts/development/m3_stack/m3coordinator.yml +++ b/scripts/development/m3_stack/m3coordinator.yml @@ -67,3 +67,6 @@ ingest: jitter: true handler: protobufEnabled: false + +carbon: + enabled: true diff --git a/scripts/development/m3_stack/start_m3.sh b/scripts/development/m3_stack/start_m3.sh index 0171586ac1..7cd4394a3d 100755 --- a/scripts/development/m3_stack/start_m3.sh +++ b/scripts/development/m3_stack/start_m3.sh @@ -9,7 +9,7 @@ if [[ "$FORCE_BUILD" = true ]] ; then DOCKER_ARGS="--build -d --renew-anon-volumes" fi -echo "Bringing up nodes in the backgorund with docker compose, remember to run ./stop.sh when done" +echo "Bringing up nodes in the background with docker compose, remember to run ./stop.sh when done" docker-compose -f docker-compose.yml up $DOCKER_ARGS m3coordinator01 docker-compose -f docker-compose.yml up $DOCKER_ARGS m3db_seed docker-compose -f docker-compose.yml up $DOCKER_ARGS prometheus01 From 19503ab9871311cc4a6f633b0b261c35b29a21b2 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 14:35:19 -0500 Subject: [PATCH 23/70] Share code among docker integration tests --- .../carbon/docker-compose.yml | 28 ++++++ .../carbon/m3coordinator.yml | 54 +++++++++++ .../docker-integration-tests/carbon/python.py | 4 + .../docker-integration-tests/carbon/test.sh | 33 +++++++ scripts/docker-integration-tests/common.sh | 96 +++++++++++++++++++ .../prometheus/test.sh | 90 +---------------- .../docker-integration-tests/simple/test.sh | 61 +----------- 7 files changed, 217 insertions(+), 149 deletions(-) create mode 100644 scripts/docker-integration-tests/carbon/docker-compose.yml create mode 100644 scripts/docker-integration-tests/carbon/m3coordinator.yml create mode 100644 scripts/docker-integration-tests/carbon/python.py create mode 100755 scripts/docker-integration-tests/carbon/test.sh diff --git a/scripts/docker-integration-tests/carbon/docker-compose.yml b/scripts/docker-integration-tests/carbon/docker-compose.yml new file mode 100644 index 0000000000..53a28f0b88 --- /dev/null +++ b/scripts/docker-integration-tests/carbon/docker-compose.yml @@ -0,0 +1,28 @@ +version: "3.5" +services: + dbnode01: + expose: + - "9000-9004" + - "2379-2380" + ports: + - "0.0.0.0:9000-9004:9000-9004" + - "0.0.0.0:2379-2380:2379-2380" + networks: + - backend + image: "m3dbnode_integration:${REVISION}" + coordinator01: + expose: + - "7201" + - "7203" + - "7204" + ports: + - "0.0.0.0:7201:7201" + - "0.0.0.0:7203:7203" + - "0.0.0.0:7204:7204" + networks: + - backend + image: "m3coordinator_integration:${REVISION}" + volumes: + - "./:/etc/m3coordinator/" +networks: + backend: diff --git a/scripts/docker-integration-tests/carbon/m3coordinator.yml b/scripts/docker-integration-tests/carbon/m3coordinator.yml new file mode 100644 index 0000000000..b015079adb --- /dev/null +++ b/scripts/docker-integration-tests/carbon/m3coordinator.yml @@ -0,0 +1,54 @@ +listenAddress: + type: "config" + value: "0.0.0.0:7201" + +metrics: + scope: + prefix: "coordinator" + prometheus: + handlerPath: /metrics + listenAddress: 0.0.0.0:7203 # until https://github.com/m3db/m3/issues/682 is resolved + sanitization: prometheus + samplingRate: 1.0 + extended: none + +clusters: + - namespaces: + - namespace: agg + type: aggregated + retention: 10h + resolution: 15s + - namespace: unagg + type: unaggregated + retention: 10m + client: + config: + service: + env: default_env + zone: embedded + service: m3db + cacheDir: /var/lib/m3kv + etcdClusters: + - zone: embedded + endpoints: + - dbnode01:2379 + writeConsistencyLevel: majority + readConsistencyLevel: unstrict_majority + writeTimeout: 10s + fetchTimeout: 15s + connectTimeout: 20s + writeRetry: + initialBackoff: 500ms + backoffFactor: 3 + maxRetries: 2 + jitter: true + fetchRetry: + initialBackoff: 500ms + backoffFactor: 2 + maxRetries: 3 + jitter: true + backgroundHealthCheckFailLimit: 4 + backgroundHealthCheckFailThrottleFactor: 0.5 + +carbon: + enabled: true \ No newline at end of file diff --git a/scripts/docker-integration-tests/carbon/python.py b/scripts/docker-integration-tests/carbon/python.py new file mode 100644 index 0000000000..3376001c57 --- /dev/null +++ b/scripts/docker-integration-tests/carbon/python.py @@ -0,0 +1,4 @@ +import pipes +rawstring = r's=$(date +%s); curl -sSfg "http://localhost:7201/api/v1/query_range?start=$(( s - 300))&end=$s&step=10&query={__graphite0__='foo',__graphite1__='bar',__graphite2__='baz'}" | jq ".data.result[0].values[][1]==\"1\"" | grep -m 1 true' + +print pipes.quote(rawstring) \ No newline at end of file diff --git a/scripts/docker-integration-tests/carbon/test.sh b/scripts/docker-integration-tests/carbon/test.sh new file mode 100755 index 0000000000..0fdf580c3a --- /dev/null +++ b/scripts/docker-integration-tests/carbon/test.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +set -xe + +source $GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/common.sh +REVISION=$(git rev-parse HEAD) +COMPOSE_FILE=$GOPATH/src/github.com/m3db/m3/scripts/docker-integration-tests/carbon/docker-compose.yml +export REVISION + +echo "Run m3dbnode and m3coordinator containers" +docker-compose -f ${COMPOSE_FILE} up -d dbnode01 +docker-compose -f ${COMPOSE_FILE} up -d coordinator01 + +# think of this as a defer func() in golang +function defer { + docker-compose -f ${COMPOSE_FILE} down || echo "unable to shutdown containers" # CI fails to stop all containers sometimes +} +trap defer EXIT + +setup_single_m3db_node + +echo "Writing out a carbon metric" +echo "foo.bar.baz 1 `date +%s`" | nc 0.0.0.0 7204 + +echo "Attempting to read carbon metric back" +function read_carbon { + end=$(date +%s) + start=$(($end-300)) + RESPONSE=$(curl -sSfg "http://localhost:7201/api/v1/query_range?start=$start&end=$end&step=10&query={__graphite0__='foo',__graphite1__='bar',__graphite2__='baz'}") + echo "$RESPONSE" | jq '.data.result[0].values[][1]=="1"' | grep -q "true" + return $? +} +ATTEMPTS=10 TIMEOUT=1 retry_with_backoff read_carbon diff --git a/scripts/docker-integration-tests/common.sh b/scripts/docker-integration-tests/common.sh index 0e22a0d0ef..93dca8535d 100644 --- a/scripts/docker-integration-tests/common.sh +++ b/scripts/docker-integration-tests/common.sh @@ -36,3 +36,99 @@ function retry_with_backoff { return $exitCode } + +function setup_single_m3db_node { + wait_for_db_init +} + +function wait_for_db_init { + echo "Sleeping for a bit to ensure db up" + sleep 15 # TODO Replace sleeps with logic to determine when to proceed + + echo "Adding namespace" + curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/namespace -d '{ + "name": "agg", + "options": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "snapshotEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodNanos": 172800000000000, + "blockSizeNanos": 7200000000000, + "bufferFutureNanos": 600000000000, + "bufferPastNanos": 600000000000, + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodNanos": 300000000000 + }, + "indexOptions": { + "enabled": true, + "blockSizeNanos": 7200000000000 + } + } + }' + + echo "Sleep until namespace is init'd" + ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/namespace | jq .registry.namespaces.agg.indexOptions.enabled)" == true ]' + + curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/namespace -d '{ + "name": "unagg", + "options": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "snapshotEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodNanos": 172800000000000, + "blockSizeNanos": 7200000000000, + "bufferFutureNanos": 600000000000, + "bufferPastNanos": 600000000000, + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodNanos": 300000000000 + }, + "indexOptions": { + "enabled": true, + "blockSizeNanos": 7200000000000 + } + } + }' + + echo "Sleep until namespace is init'd" + ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/namespace | jq .registry.namespaces.unagg.indexOptions.enabled)" == true ]' + + echo "Placement initialization" + curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/placement/init -d '{ + "num_shards": 64, + "replication_factor": 1, + "instances": [ + { + "id": "m3db_local", + "isolation_group": "rack-a", + "zone": "embedded", + "weight": 1024, + "endpoint": "dbnode01:9000", + "hostname": "dbnode01", + "port": 9000 + } + ] + }' + + echo "Sleep until placement is init'd" + ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | jq .placement.instances.m3db_local.id)" == \"m3db_local\" ]' + + echo "Sleep until bootstrapped" + ATTEMPTS=10 TIMEOUT=2 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:9002/health | jq .bootstrapped)" == true ]' + + echo "Waiting until shards are marked as available" + ATTEMPTS=10 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | grep -c INITIALIZING)" -eq 0 ]' +} + diff --git a/scripts/docker-integration-tests/prometheus/test.sh b/scripts/docker-integration-tests/prometheus/test.sh index 2e7acd1b29..04362a8911 100755 --- a/scripts/docker-integration-tests/prometheus/test.sh +++ b/scripts/docker-integration-tests/prometheus/test.sh @@ -17,95 +17,7 @@ function defer { } trap defer EXIT - -echo "Sleeping for a bit to ensure db up" -sleep 15 # TODO Replace sleeps with logic to determine when to proceed - -echo "Adding namespace" -curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/namespace -d '{ - "name": "agg", - "options": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "snapshotEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodNanos": 172800000000000, - "blockSizeNanos": 7200000000000, - "bufferFutureNanos": 600000000000, - "bufferPastNanos": 600000000000, - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": 300000000000 - }, - "indexOptions": { - "enabled": true, - "blockSizeNanos": 7200000000000 - } - } -}' - -echo "Sleep until namespace is init'd" -ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:7201/api/v1/namespace | jq .registry.namespaces.agg.indexOptions.enabled)" == true ]' - -curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/namespace -d '{ - "name": "unagg", - "options": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "snapshotEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodNanos": 172800000000000, - "blockSizeNanos": 7200000000000, - "bufferFutureNanos": 600000000000, - "bufferPastNanos": 600000000000, - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": 300000000000 - }, - "indexOptions": { - "enabled": true, - "blockSizeNanos": 7200000000000 - } - } -}' - -echo "Sleep until namespace is init'd" -ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:7201/api/v1/namespace | jq .registry.namespaces.unagg.indexOptions.enabled)" == true ]' - -echo "Placement initialization" -curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/placement/init -d '{ - "num_shards": 64, - "replication_factor": 1, - "instances": [ - { - "id": "m3db_local", - "isolation_group": "rack-a", - "zone": "embedded", - "weight": 1024, - "endpoint": "dbnode01:9000", - "hostname": "dbnode01", - "port": 9000 - } - ] -}' - -echo "Sleep until placement is init'd" -ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | jq .placement.instances.m3db_local.id)" == \"m3db_local\" ]' - -echo "Sleep until bootstrapped" -ATTEMPTS=10 TIMEOUT=2 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:9002/health | jq .bootstrapped)" == true ]' - -echo "Waiting until shards are marked as available" -ATTEMPTS=10 TIMEOUT=1 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | grep -c INITIALIZING)" -eq 0 ]' +setup_single_m3db_node echo "Start Prometheus containers" docker-compose -f ${COMPOSE_FILE} up -d prometheus01 diff --git a/scripts/docker-integration-tests/simple/test.sh b/scripts/docker-integration-tests/simple/test.sh index ade8f772a8..a7ee35a9bb 100755 --- a/scripts/docker-integration-tests/simple/test.sh +++ b/scripts/docker-integration-tests/simple/test.sh @@ -24,66 +24,7 @@ if [ $? -ne 0 ]; then exit 1 fi -echo "Sleeping for a bit to ensure db up" -sleep 5 # TODO Replace sleeps with logic to determine when to proceed - -echo "Adding namespace" -curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/namespace -d '{ - "name": "default", - "options": { - "bootstrapEnabled": true, - "flushEnabled": true, - "writesToCommitLog": true, - "cleanupEnabled": true, - "snapshotEnabled": true, - "repairEnabled": false, - "retentionOptions": { - "retentionPeriodNanos": 172800000000000, - "blockSizeNanos": 7200000000000, - "bufferFutureNanos": 600000000000, - "bufferPastNanos": 600000000000, - "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": 300000000000 - }, - "indexOptions": { - "enabled": true, - "blockSizeNanos": 7200000000000 - } - } -}' - -echo "Sleep until namespace is init'd" -ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:7201/api/v1/namespace | jq .registry.namespaces.default.indexOptions.enabled)" == true ]' - -echo "Placement initialization" -curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/placement/init -d '{ - "num_shards": 64, - "replication_factor": 1, - "instances": [ - { - "id": "m3db_local", - "isolation_group": "rack-a", - "zone": "embedded", - "weight": 1024, - "endpoint": "127.0.0.1:9000", - "hostname": "127.0.0.1", - "port": 9000 - } - ] -}' - -echo "Sleep until placement is init'd" -ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | jq .placement.instances.m3db_local.id)" == \"m3db_local\" ]' - -echo "Sleep until bootstrapped" -ATTEMPTS=10 TIMEOUT=2 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:9002/health | jq .bootstrapped)" == true ]' - -echo "Waiting until shards are marked as available" -ATTEMPTS=10 TIMEOUT=1 retry_with_backoff \ - '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | grep -c INITIALIZING)" -eq 0 ]' +setup_single_m3db_node echo "Write data" curl -vvvsS -X POST 0.0.0.0:9003/writetagged -d '{ From f229bb75b0c179c304c504cbcf6e7bae019d1302 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 14:35:30 -0500 Subject: [PATCH 24/70] Add new docker integration test to makefile --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index d234934883..294e8aea64 100644 --- a/Makefile +++ b/Makefile @@ -200,6 +200,7 @@ docker-integration-test: @./scripts/docker-integration-tests/setup.sh @./scripts/docker-integration-tests/simple/test.sh @./scripts/docker-integration-tests/prometheus/test.sh + @./scripts/docker-integration-tests/carbon/test.sh .PHONY: site-build site-build: From d2c4b5124be29df5231a644ea3d8a7d65dc54c71 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:05:43 -0500 Subject: [PATCH 25/70] fix broken test --- src/query/api/v1/httpd/handler_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/query/api/v1/httpd/handler_test.go b/src/query/api/v1/httpd/handler_test.go index f9544b44a9..8304b176d1 100644 --- a/src/query/api/v1/httpd/handler_test.go +++ b/src/query/api/v1/httpd/handler_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/cmd/services/m3query/config" m3json "github.com/m3db/m3/src/query/api/v1/handler/json" "github.com/m3db/m3/src/query/api/v1/handler/prometheus/native" @@ -48,7 +49,8 @@ func makeTagOptions() models.TagOptions { } func setupHandler(store storage.Storage) (*Handler, error) { - return NewHandler(store, makeTagOptions(), nil, executor.NewEngine(store, tally.NewTestScope("test", nil)), nil, nil, + downsamplerAndWriter := ingest.NewDownsamplerAndWriter(store, nil) + return NewHandler(downsamplerAndWriter, makeTagOptions(), executor.NewEngine(store, tally.NewTestScope("test", nil)), nil, nil, config.Configuration{}, nil, tally.NewTestScope("", nil)) } From d219402eea64651e3d4f26951216f324edea944f Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:07:45 -0500 Subject: [PATCH 26/70] fix imports --- src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go | 1 + src/cmd/services/m3coordinator/ingest/write_test.go | 6 +++--- src/query/api/v1/handler/prometheus/remote/write_test.go | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index e0feadc22c..66748431d7 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -39,6 +39,7 @@ import ( "github.com/m3db/m3x/instrument" xsync "github.com/m3db/m3x/sync" xtime "github.com/m3db/m3x/time" + "github.com/stretchr/testify/require" ) diff --git a/src/cmd/services/m3coordinator/ingest/write_test.go b/src/cmd/services/m3coordinator/ingest/write_test.go index 42a550f6cb..b34f9231b3 100644 --- a/src/cmd/services/m3coordinator/ingest/write_test.go +++ b/src/cmd/services/m3coordinator/ingest/write_test.go @@ -25,15 +25,15 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - "github.com/m3db/m3/src/cmd/services/m3coordinator/downsample" "github.com/m3db/m3/src/dbnode/client" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/test/m3" "github.com/m3db/m3/src/query/ts" xtime "github.com/m3db/m3x/time" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" ) var ( diff --git a/src/query/api/v1/handler/prometheus/remote/write_test.go b/src/query/api/v1/handler/prometheus/remote/write_test.go index 586fc9cc73..83e35fe83a 100644 --- a/src/query/api/v1/handler/prometheus/remote/write_test.go +++ b/src/query/api/v1/handler/prometheus/remote/write_test.go @@ -27,13 +27,13 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/dbnode/x/metrics" "github.com/m3db/m3/src/query/api/v1/handler/prometheus/remote/test" "github.com/m3db/m3/src/query/util/logging" xclock "github.com/m3db/m3x/clock" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/uber-go/tally" ) From eabf575df1ac4ac2b1e92db8c08a6c0bd4edb1da Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:10:06 -0500 Subject: [PATCH 27/70] making writing to unaggregated optional --- .../services/m3coordinator/ingest/write.go | 79 +++++++++++-------- 1 file changed, 44 insertions(+), 35 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index 648215bbca..924a75814d 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -108,14 +108,19 @@ func (d *downsamplerAndWriter) Write( appender.Finalize() } - return d.store.Write(ctx, &storage.WriteQuery{ - Tags: tags, - Datapoints: datapoints, - Unit: unit, - Attributes: storage.Attributes{ - MetricsType: storage.UnaggregatedMetricsType, - }, - }) + if d.store != nil { + return d.store.Write(ctx, &storage.WriteQuery{ + Tags: tags, + Datapoints: datapoints, + Unit: unit, + Attributes: storage.Attributes{ + MetricsType: storage.UnaggregatedMetricsType, + }, + }) + + } + + return nil } func (d *downsamplerAndWriter) WriteBatch( @@ -158,35 +163,39 @@ func (d *downsamplerAndWriter) WriteBatch( } } - // Write unaggregated. - var ( - wg = &sync.WaitGroup{} - errLock sync.Mutex - multiErr xerrors.MultiError - ) - for iter.Next() { - wg.Add(1) - tags, datapoints, unit := iter.Current() - go func() { - err := d.store.Write(ctx, &storage.WriteQuery{ - Tags: tags, - Datapoints: datapoints, - Unit: unit, - Attributes: storage.Attributes{ - MetricsType: storage.UnaggregatedMetricsType, - }, - }) - if err != nil { - errLock.Lock() - multiErr = multiErr.Add(err) - errLock.Unlock() - } - wg.Done() - }() + if d.store != nil { + // Write unaggregated. + var ( + wg = &sync.WaitGroup{} + errLock sync.Mutex + multiErr xerrors.MultiError + ) + for iter.Next() { + wg.Add(1) + tags, datapoints, unit := iter.Current() + go func() { + err := d.store.Write(ctx, &storage.WriteQuery{ + Tags: tags, + Datapoints: datapoints, + Unit: unit, + Attributes: storage.Attributes{ + MetricsType: storage.UnaggregatedMetricsType, + }, + }) + if err != nil { + errLock.Lock() + multiErr = multiErr.Add(err) + errLock.Unlock() + } + wg.Done() + }() + } + + wg.Wait() + return multiErr.LastError() } - wg.Wait() - return multiErr.LastError() + return nil } func (d *downsamplerAndWriter) Storage() storage.Storage { From 9c84978b41cadb6d6ca1598678a7bda894838fe5 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:11:45 -0500 Subject: [PATCH 28/70] disable unaggregated writes in carbon ingester --- src/query/server/server.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/query/server/server.go b/src/query/server/server.go index d1ed63f21e..8d22eb3672 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -207,8 +207,8 @@ func Run(runOpts RunOptions) { engine := executor.NewEngine(backendStorage, scope.SubScope("engine")) - downsamplerAndWriter := ingest.NewDownsamplerAndWriter(backendStorage, downsampler) - handler, err := httpd.NewHandler(downsamplerAndWriter, tagOptions, engine, + promDownsamplerAndWriter := ingest.NewDownsamplerAndWriter(backendStorage, downsampler) + handler, err := httpd.NewHandler(promDownsamplerAndWriter, tagOptions, engine, m3dbClusters, clusterClient, cfg, runOpts.DBConfig, scope) if err != nil { logger.Fatal("unable to set up handlers", zap.Error(err)) @@ -293,7 +293,10 @@ func Run(runOpts RunOptions) { } workerPool.Init() - ingester, err := ingestcarbon.NewIngester(downsamplerAndWriter, ingestcarbon.Options{ + // Create a new downsampler and writer because we don't want the carbon ingester to write + // any data unaggregated, so we pass nil for storage.Storage. + carbonIngestDownsamplerAndWriter := ingest.NewDownsamplerAndWriter(nil, downsampler) + ingester, err := ingestcarbon.NewIngester(carbonIngestDownsamplerAndWriter, ingestcarbon.Options{ InstrumentOptions: carbonIOpts, WorkerPool: workerPool, }) From 977b4256c5b00f4c1c87583537bfc91b4444a106 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:13:34 -0500 Subject: [PATCH 29/70] fix typo --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 612ed80d79..8ef756f884 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -121,7 +121,7 @@ func (i *ingester) Handle(conn net.Conn) { logger.Debugf("waiting for outstanding carbon ingestion writes to complete") wg.Wait() - logger.Debugf("all outstanding writes completed, shuttin down carbon ingestion handler") + logger.Debugf("all outstanding writes completed, shutting down carbon ingestion handler") // Don't close the connection, that is the server's responsibility. } From 7b14c97943f2a7440e5a8eb9a62ad8f53d216655 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:20:09 -0500 Subject: [PATCH 30/70] refactor interfaces --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 10 ++++------ .../ingest/carbon/ingest_benchmark_test.go | 2 +- .../m3coordinator/ingest/carbon/ingest_test.go | 4 ++-- src/metrics/carbon/parser.go | 9 +++++++-- src/metrics/carbon/parser_test.go | 5 ++++- 5 files changed, 18 insertions(+), 12 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 8ef756f884..f466533bb7 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -75,10 +75,11 @@ type ingester struct { opts Options } +// TODO: Emit metrics func (i *ingester) Handle(conn net.Conn) { var ( wg = sync.WaitGroup{} - s = carbon.NewScanner(conn) + s = carbon.NewScanner(conn, i.opts.InstrumentOptions) logger = i.opts.InstrumentOptions.Logger() ) @@ -93,7 +94,7 @@ func (i *ingester) Handle(conn net.Conn) { i.opts.WorkerPool.Go(func() { datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} // TODO: Pool. - tags, err := generateTagsFromName(name) + tags, err := GenerateTagsFromName(name) if err != nil { logger.Errorf("err generating tags from carbon name: %s, err: %s", string(name), err) @@ -110,9 +111,6 @@ func (i *ingester) Handle(conn net.Conn) { wg.Done() }) - - // i.metrics.malformedCounter.Inc(int64(s.MalformedCount)) - s.MalformedCount = 0 } if err := s.Err(); err != nil { @@ -145,7 +143,7 @@ type carbonHandlerMetrics struct { readTimeLatency tally.Timer } -func generateTagsFromName(name []byte) (models.Tags, error) { +func GenerateTagsFromName(name []byte) (models.Tags, error) { if len(name) == 0 { return models.Tags{}, errCannotGenerateTagsFromEmptyName } diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go index 2d48c87879..082dba077c 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_benchmark_test.go @@ -35,7 +35,7 @@ func BenchmarkGenerateTagsFromName(b *testing.B) { ) for i := 0; i < b.N; i++ { - benchmarkGenerateTagsSink, err = generateTagsFromName(testName) + benchmarkGenerateTagsSink, err = GenerateTagsFromName(testName) if err != nil { panic(err) } diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index 66748431d7..fc2ed386b6 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -128,7 +128,7 @@ func TestGenerateTagsFromName(t *testing.T) { } for _, tc := range testCases { - tags, err := generateTagsFromName([]byte(tc.name)) + tags, err := GenerateTagsFromName([]byte(tc.name)) if tc.expectedErr != nil { require.Equal(t, tc.expectedErr, err) } else { @@ -207,7 +207,7 @@ func init() { for i := 0; i < numLinesInTestPacket; i++ { metric := []byte(fmt.Sprintf("test_metric_%d", i)) - tags, err := generateTagsFromName(metric) + tags, err := GenerateTagsFromName(metric) if err != nil { panic(err) } diff --git a/src/metrics/carbon/parser.go b/src/metrics/carbon/parser.go index f7381aeed7..ade1ebc4c3 100644 --- a/src/metrics/carbon/parser.go +++ b/src/metrics/carbon/parser.go @@ -31,6 +31,7 @@ import ( "time" "unicode/utf8" + "github.com/m3db/m3x/instrument" "github.com/m3db/m3x/unsafe" ) @@ -229,10 +230,12 @@ type Scanner struct { // The number of malformed metrics encountered. MalformedCount int + + iOpts instrument.Options } // NewScanner creates a new carbon scanner. -func NewScanner(r io.Reader) *Scanner { +func NewScanner(r io.Reader, iOpts instrument.Options) *Scanner { s := bufio.NewScanner(r) s.Split(bufio.ScanLines) return &Scanner{scanner: s} @@ -248,7 +251,9 @@ func (s *Scanner) Scan() bool { var err error if s.path, s.timestamp, s.value, err = Parse(s.scanner.Bytes()); err != nil { // TODO: Convert to log - fmt.Printf("malformed: %s, err: %s\n", s.path, err.Error()) + s.iOpts.Logger().Errorf( + "error trying to scan malformed carbon line: %s, err: %s", + string(s.path), err.Error()) s.MalformedCount++ continue } diff --git a/src/metrics/carbon/parser_test.go b/src/metrics/carbon/parser_test.go index d87b74680a..193cb2c43b 100644 --- a/src/metrics/carbon/parser_test.go +++ b/src/metrics/carbon/parser_test.go @@ -28,12 +28,15 @@ import ( "testing" "time" + "github.com/m3db/m3x/instrument" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( nullTime time.Time + + testIOpts = instrument.NewOptions() ) type carbonLine struct { @@ -64,7 +67,7 @@ func TestScannerMetric(t *testing.T) { fmt.Fprintf(&buf, "%s\n", line.line) } - s := NewScanner(&buf) + s := NewScanner(&buf, testIOpts) for _, line := range testLines { t.Run(line.path, func(t *testing.T) { require.True(t, s.Scan(), "could not parse to line %s, err: %v", line.line, s.Err()) From 5f6c303d9e0d7229043b42bb810882d00023e9e1 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:23:09 -0500 Subject: [PATCH 31/70] remove todo --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 8 ++++---- src/metrics/carbon/parser.go | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index f466533bb7..35d42d7ddb 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -75,7 +75,7 @@ type ingester struct { opts Options } -// TODO: Emit metrics +// TODO(rartoul): Emit metrics func (i *ingester) Handle(conn net.Conn) { var ( wg = sync.WaitGroup{} @@ -87,13 +87,13 @@ func (i *ingester) Handle(conn net.Conn) { for s.Scan() { name, timestamp, value := s.Metric() // Copy name since scanner bytes are recycled. - // TODO: Pool. + // TODO(rartoul): Pool. name = append([]byte(nil), name...) wg.Add(1) i.opts.WorkerPool.Go(func() { datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} - // TODO: Pool. + // TODO(rartoul): Pool. tags, err := GenerateTagsFromName(name) if err != nil { logger.Errorf("err generating tags from carbon name: %s, err: %s", @@ -101,7 +101,7 @@ func (i *ingester) Handle(conn net.Conn) { return } - // TODO: Real context? + // TODO(rartoul): Context with timeout? err = i.downsamplerAndWriter.Write( context.Background(), tags, datapoints, xtime.Second) if err != nil { diff --git a/src/metrics/carbon/parser.go b/src/metrics/carbon/parser.go index ade1ebc4c3..06558a14d1 100644 --- a/src/metrics/carbon/parser.go +++ b/src/metrics/carbon/parser.go @@ -250,7 +250,6 @@ func (s *Scanner) Scan() bool { var err error if s.path, s.timestamp, s.value, err = Parse(s.scanner.Bytes()); err != nil { - // TODO: Convert to log s.iOpts.Logger().Errorf( "error trying to scan malformed carbon line: %s, err: %s", string(s.path), err.Error()) From 9ff3a6a4d17b78ee89fbdbfd9d2bd3a71f6d66f3 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:23:57 -0500 Subject: [PATCH 32/70] refactor metrics struct --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 35d42d7ddb..2feabd424c 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -128,12 +128,11 @@ func (i *ingester) Close() { // We don't maintain any state in-between connections so there is nothing to do here. } -func newCarbonHandlerMetrics(m tally.Scope) carbonHandlerMetrics { +func newCarbonIngesterMetrics(m tally.Scope) carbonHandlerMetrics { writesScope := m.SubScope("writes") return carbonHandlerMetrics{ unresolvedIDs: writesScope.Counter("ids-policy-unresolved"), malformedCounter: writesScope.Counter("malformed"), - readTimeLatency: writesScope.Timer("read-time-latency"), } } From 0607d6c657f4ed0f0869482661acf81dc035db69 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:25:20 -0500 Subject: [PATCH 33/70] add docstring --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 2feabd424c..d96b7ab0e7 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -142,6 +142,13 @@ type carbonHandlerMetrics struct { readTimeLatency tally.Timer } +// GenerateTagsFromName accepts a carbon metric name and blows it up into a list of +// key-value pair tags such that an input like: +// foo.bar.baz +// becomes +// __graphite0__:foo +// __graphite1__:bar +// __graphite2__:baz func GenerateTagsFromName(name []byte) (models.Tags, error) { if len(name) == 0 { return models.Tags{}, errCannotGenerateTagsFromEmptyName From 74bc426567488556537b35af3e76b238db608539 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:42:48 -0500 Subject: [PATCH 34/70] make prom TS iter more efficient --- .../api/v1/handler/prometheus/remote/write.go | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/src/query/api/v1/handler/prometheus/remote/write.go b/src/query/api/v1/handler/prometheus/remote/write.go index 441c93e102..4c177b6e03 100644 --- a/src/query/api/v1/handler/prometheus/remote/write.go +++ b/src/query/api/v1/handler/prometheus/remote/write.go @@ -125,36 +125,46 @@ func (h *PromWriteHandler) parseRequest(r *http.Request) (*prompb.WriteRequest, } func (h *PromWriteHandler) write(ctx context.Context, r *prompb.WriteRequest) error { - iter := newPromTSIter(r.Timeseries) + iter := newPromTSIter(r.Timeseries, h.tagOptions) return h.downsamplerAndWriter.WriteBatch(ctx, iter) } -type promTSIter struct { - idx int - timeseries []*prompb.TimeSeries +func newPromTSIter(timeseries []*prompb.TimeSeries, tagOpts models.TagOptions) *promTSIter { + // Construct the tags and datapoints upfront so that if the iterator + // is reset, we don't have to generate them twice. + var ( + tags = make([]models.Tags, 0, len(timeseries)) + datapoints = make([]ts.Datapoints, 0, len(timeseries)) + ) + for _, promTS := range timeseries { + tags = append(tags, storage.PromLabelsToM3Tags(promTS.Labels, tagOpts)) + datapoints = append(datapoints, storage.PromSamplesToM3Datapoints(promTS.Samples)) + } + + return &promTSIter{ + idx: -1, + tags: tags, + datapoints: datapoints, + } } -func newPromTSIter(timeseries []*prompb.TimeSeries) *promTSIter { - return &promTSIter{idx: -1, timeseries: timeseries} +type promTSIter struct { + idx int + tags []models.Tags + datapoints []ts.Datapoints } func (i *promTSIter) Next() bool { i.idx++ - return i.idx < len(i.timeseries) + return i.idx < len(i.tags) } func (i *promTSIter) Current() (models.Tags, ts.Datapoints, xtime.Unit) { - if len(i.timeseries) == 0 || i.idx < 0 || i.idx >= len(i.timeseries) { + if len(i.tags) == 0 || i.idx < 0 || i.idx >= len(i.tags) { return models.Tags{}, nil, 0 } - curr := i.timeseries[i.idx] - // TODO: Add a storage function for this? - tags := make([]models.Tag, 0, len(curr.Labels)) - for _, label := range curr.Labels { - tags = append(tags, models.Tag{Name: label.Name, Value: label.Value}) - } - return models.Tags{Tags: tags}, storage.PromSamplesToM3Datapoints(curr.Samples), xtime.Millisecond + return i.tags[i.idx], i.datapoints[i.idx], xtime.Millisecond } func (i *promTSIter) Reset() error { From be483c92910229adf06db426b82dedd459c214d4 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 15:48:31 -0500 Subject: [PATCH 35/70] refactor write method --- .../services/m3coordinator/ingest/write.go | 83 ++++++++++--------- 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index 924a75814d..cb890d13b2 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -127,11 +127,46 @@ func (d *downsamplerAndWriter) WriteBatch( ctx context.Context, iter DownsampleAndWriteIter, ) error { + var ( + wg = &sync.WaitGroup{} + multiErr xerrors.MultiError + errLock sync.Mutex + addError = func(err error) { + errLock.Lock() + multiErr = multiErr.Add(err) + errLock.Unlock() + } + ) + + if d.store != nil { + // Write unaggregated. Spin up all the background goroutines that make + // network requests before we do the synchronous work of writing to the + // downsampler. + for iter.Next() { + wg.Add(1) + tags, datapoints, unit := iter.Current() + go func() { + err := d.store.Write(ctx, &storage.WriteQuery{ + Tags: tags, + Datapoints: datapoints, + Unit: unit, + Attributes: storage.Attributes{ + MetricsType: storage.UnaggregatedMetricsType, + }, + }) + if err != nil { + addError(err) + } + wg.Done() + }() + } + } + if d.downsampler != nil { // Write aggregated. appender, err := d.downsampler.NewMetricsAppender() if err != nil { - return err + addError(err) } for iter.Next() { @@ -143,59 +178,27 @@ func (d *downsamplerAndWriter) WriteBatch( samplesAppender, err := appender.SamplesAppender() if err != nil { - return err + addError(err) + continue } for _, dp := range datapoints { err := samplesAppender.AppendGaugeSample(dp.Value) if err != nil { - return err + addError(err) + continue } } } appender.Finalize() if err := iter.Error(); err != nil { - return err + addError(err) } - if err := iter.Reset(); err != nil { - return err - } - } - - if d.store != nil { - // Write unaggregated. - var ( - wg = &sync.WaitGroup{} - errLock sync.Mutex - multiErr xerrors.MultiError - ) - for iter.Next() { - wg.Add(1) - tags, datapoints, unit := iter.Current() - go func() { - err := d.store.Write(ctx, &storage.WriteQuery{ - Tags: tags, - Datapoints: datapoints, - Unit: unit, - Attributes: storage.Attributes{ - MetricsType: storage.UnaggregatedMetricsType, - }, - }) - if err != nil { - errLock.Lock() - multiErr = multiErr.Add(err) - errLock.Unlock() - } - wg.Done() - }() - } - - wg.Wait() - return multiErr.LastError() } - return nil + wg.Wait() + return multiErr.LastError() } func (d *downsamplerAndWriter) Storage() storage.Storage { From 9c36aac2b22c89a4ce7b68163930d03ab2dde06b Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 16:33:29 -0500 Subject: [PATCH 36/70] fix tests and add comment --- src/cmd/services/m3coordinator/ingest/write.go | 9 ++++++++- src/cmd/services/m3coordinator/ingest/write_test.go | 8 ++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index cb890d13b2..9f911371dc 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -162,7 +162,14 @@ func (d *downsamplerAndWriter) WriteBatch( } } - if d.downsampler != nil { + // Iter does not need to be synchronized because even though we use it to spawn + // many goroutines above, the iteration is always synchronous. + resetErr := iter.Reset() + if resetErr != nil { + addError(resetErr) + } + + if d.downsampler != nil && resetErr == nil { // Write aggregated. appender, err := d.downsampler.NewMetricsAppender() if err != nil { diff --git a/src/cmd/services/m3coordinator/ingest/write_test.go b/src/cmd/services/m3coordinator/ingest/write_test.go index b34f9231b3..00aba1a045 100644 --- a/src/cmd/services/m3coordinator/ingest/write_test.go +++ b/src/cmd/services/m3coordinator/ingest/write_test.go @@ -143,6 +143,8 @@ func (i *testIter) Error() error { func TestDownsampleAndWrite(t *testing.T) { ctrl := gomock.NewController(t) + defer ctrl.Finish() + downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl) var ( @@ -176,6 +178,8 @@ func TestDownsampleAndWrite(t *testing.T) { func TestDownsampleAndWriteNoDownsampler(t *testing.T) { ctrl := gomock.NewController(t) + defer ctrl.Finish() + downAndWrite, _, session := newTestDownsamplerAndWriter(t, ctrl) downAndWrite.downsampler = nil @@ -190,6 +194,8 @@ func TestDownsampleAndWriteNoDownsampler(t *testing.T) { func TestDownsampleAndWriteBatch(t *testing.T) { ctrl := gomock.NewController(t) + defer ctrl.Finish() + downAndWrite, downsampler, session := newTestDownsamplerAndWriter(t, ctrl) var ( @@ -234,6 +240,8 @@ func TestDownsampleAndWriteBatch(t *testing.T) { func TestDownsampleAndWriteBatchNoDownsampler(t *testing.T) { ctrl := gomock.NewController(t) + defer ctrl.Finish() + downAndWrite, _, session := newTestDownsamplerAndWriter(t, ctrl) downAndWrite.downsampler = nil From 2e7ec1ca8dcf8c0522f5ad0376c54ab7457e3cd9 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 16:34:10 -0500 Subject: [PATCH 37/70] disable test --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 294e8aea64..bea68d9818 100644 --- a/Makefile +++ b/Makefile @@ -200,7 +200,8 @@ docker-integration-test: @./scripts/docker-integration-tests/setup.sh @./scripts/docker-integration-tests/simple/test.sh @./scripts/docker-integration-tests/prometheus/test.sh - @./scripts/docker-integration-tests/carbon/test.sh + # TODO(rartoul): Re-enable once the query P.R lands and we can fix this test. + # @./scripts/docker-integration-tests/carbon/test.sh .PHONY: site-build site-build: From 0b83af6a9b613654c82865618efbe134f509ef7e Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 16:36:45 -0500 Subject: [PATCH 38/70] make carbon enabled by default --- src/cmd/services/m3query/config/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go index 295b8d482b..45755d4dcd 100644 --- a/src/cmd/services/m3query/config/config.go +++ b/src/cmd/services/m3query/config/config.go @@ -136,6 +136,7 @@ type IngestConfiguration struct { M3Msg m3msg.Configuration `yaml:"m3msg"` } +// TODO(rartoul): Make this enabled by default. // CarbonConfiguration is the configuration for the carbon server. type CarbonConfiguration struct { Enabled bool `yaml:"enabled"` From 2a4091205f957c2db22879576a9bdd1d298dbea4 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 16:38:47 -0500 Subject: [PATCH 39/70] add TODO --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index d96b7ab0e7..98e2c909f1 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -64,6 +64,7 @@ func NewIngester( downsamplerAndWriter ingest.DownsamplerAndWriter, opts Options, ) (m3xserver.Handler, error) { + // TODO(rartoul): Validate options. return &ingester{ downsamplerAndWriter: downsamplerAndWriter, opts: opts, From 4c3d7ae5f345710f5667df1ad2e58da650d7b3f9 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 16:46:23 -0500 Subject: [PATCH 40/70] fix write code --- .../services/m3coordinator/ingest/write.go | 58 +++++++++++-------- 1 file changed, 33 insertions(+), 25 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index 9f911371dc..f4d4aaae08 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -170,42 +170,50 @@ func (d *downsamplerAndWriter) WriteBatch( } if d.downsampler != nil && resetErr == nil { - // Write aggregated. - appender, err := d.downsampler.NewMetricsAppender() + err := d.writeAggregatedBatch(iter) if err != nil { addError(err) } + } - for iter.Next() { - appender.Reset() - tags, datapoints, _ := iter.Current() - for _, tag := range tags.Tags { - appender.AddTag(tag.Name, tag.Value) - } + wg.Wait() + return multiErr.LastError() +} - samplesAppender, err := appender.SamplesAppender() - if err != nil { - addError(err) - continue - } +func (d *downsamplerAndWriter) writeAggregatedBatch( + iter DownsampleAndWriteIter, +) error { + appender, err := d.downsampler.NewMetricsAppender() + if err != nil { + return err + } - for _, dp := range datapoints { - err := samplesAppender.AppendGaugeSample(dp.Value) - if err != nil { - addError(err) - continue - } - } + for iter.Next() { + appender.Reset() + tags, datapoints, _ := iter.Current() + for _, tag := range tags.Tags { + appender.AddTag(tag.Name, tag.Value) } - appender.Finalize() - if err := iter.Error(); err != nil { - addError(err) + samplesAppender, err := appender.SamplesAppender() + if err != nil { + return err + } + + for _, dp := range datapoints { + err := samplesAppender.AppendGaugeSample(dp.Value) + if err != nil { + return err + } } } + appender.Finalize() - wg.Wait() - return multiErr.LastError() + if err := iter.Error(); err != nil { + return err + } + + return nil } func (d *downsamplerAndWriter) Storage() storage.Storage { From 4f1582e4781bd7632569096b53bf691c25cffe90 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:01:25 -0500 Subject: [PATCH 41/70] switch from nanos to durations --- scripts/docker-integration-tests/common.sh | 24 +++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/scripts/docker-integration-tests/common.sh b/scripts/docker-integration-tests/common.sh index 93dca8535d..cd56d071f5 100644 --- a/scripts/docker-integration-tests/common.sh +++ b/scripts/docker-integration-tests/common.sh @@ -56,16 +56,16 @@ function wait_for_db_init { "snapshotEnabled": true, "repairEnabled": false, "retentionOptions": { - "retentionPeriodNanos": 172800000000000, - "blockSizeNanos": 7200000000000, - "bufferFutureNanos": 600000000000, - "bufferPastNanos": 600000000000, + "retentionPeriodDuration": "48h", + "blockSizeDuration": "2h", + "bufferFutureDuration": "10m", + "bufferPastDuration": "10m", "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": 300000000000 + "blockDataExpiryAfterNotAccessPeriodDuration": "5m" }, "indexOptions": { "enabled": true, - "blockSizeNanos": 7200000000000 + "blockSizeDuration": "2h" } } }' @@ -84,16 +84,16 @@ function wait_for_db_init { "snapshotEnabled": true, "repairEnabled": false, "retentionOptions": { - "retentionPeriodNanos": 172800000000000, - "blockSizeNanos": 7200000000000, - "bufferFutureNanos": 600000000000, - "bufferPastNanos": 600000000000, + "retentionPeriodDuration": "48h", + "blockSizeDuration": "2h", + "bufferFutureDuration": "10m", + "bufferPastDuration": "10m", "blockDataExpiry": true, - "blockDataExpiryAfterNotAccessPeriodNanos": 300000000000 + "blockDataExpiryAfterNotAccessPeriodDuration": "5m" }, "indexOptions": { "enabled": true, - "blockSizeNanos": 7200000000000 + "blockSizeDuration": "2h" } } }' From 63b65fcb13ed27f939058efb8b25668e60ff6159 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:06:28 -0500 Subject: [PATCH 42/70] validate options --- .../m3coordinator/ingest/carbon/ingest.go | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 98e2c909f1..5aa2a55953 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -51,6 +51,8 @@ var ( preFormattedKeyNames = [][]byte{} errCannotGenerateTagsFromEmptyName = errors.New("cannot generate tags from empty name") + errIOptsMustBeSet = errors.New("carbon ingester options: instrument options must be st") + errWorkerPoolMustBeSet = errors.New("carbon ingester options: worker pool must be set") ) // Options configures the ingester. @@ -59,12 +61,29 @@ type Options struct { WorkerPool xsync.PooledWorkerPool } +// Validate validates the options struct. +func (o *Options) Validate() error { + if o.InstrumentOptions == nil { + return errIOptsMustBeSet + } + + if o.WorkerPool == nil { + return errWorkerPoolMustBeSet + } + + return nil +} + // NewIngester returns an ingester for carbon metrics. func NewIngester( downsamplerAndWriter ingest.DownsamplerAndWriter, opts Options, ) (m3xserver.Handler, error) { - // TODO(rartoul): Validate options. + err := opts.Validate() + if err != nil { + return err + } + return &ingester{ downsamplerAndWriter: downsamplerAndWriter, opts: opts, From c2a79a1c6736e306003c54ec3931dc199855ab55 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:12:06 -0500 Subject: [PATCH 43/70] factor out method for better error handling --- .../m3coordinator/ingest/carbon/ingest.go | 43 +++++++++++-------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 5aa2a55953..39b1ce7d01 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -27,12 +27,14 @@ import ( "fmt" "net" "sync" + "time" "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/metrics/carbon" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/ts" "github.com/m3db/m3x/instrument" + "github.com/m3db/m3x/log" m3xserver "github.com/m3db/m3x/server" xsync "github.com/m3db/m3x/sync" xtime "github.com/m3db/m3x/time" @@ -81,18 +83,20 @@ func NewIngester( ) (m3xserver.Handler, error) { err := opts.Validate() if err != nil { - return err + return nil, err } return &ingester{ downsamplerAndWriter: downsamplerAndWriter, opts: opts, + logger: opts.InstrumentOptions.Logger(), }, nil } type ingester struct { downsamplerAndWriter ingest.DownsamplerAndWriter opts Options + logger log.Logger } // TODO(rartoul): Emit metrics @@ -112,23 +116,7 @@ func (i *ingester) Handle(conn net.Conn) { wg.Add(1) i.opts.WorkerPool.Go(func() { - datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} - // TODO(rartoul): Pool. - tags, err := GenerateTagsFromName(name) - if err != nil { - logger.Errorf("err generating tags from carbon name: %s, err: %s", - string(name), err) - return - } - - // TODO(rartoul): Context with timeout? - err = i.downsamplerAndWriter.Write( - context.Background(), tags, datapoints, xtime.Second) - if err != nil { - logger.Errorf("err writing carbon metric: %s, err: %s", - string(name), err) - } - + i.write(name, timestamp, value) wg.Done() }) } @@ -144,6 +132,25 @@ func (i *ingester) Handle(conn net.Conn) { // Don't close the connection, that is the server's responsibility. } +func (i *ingester) write(name []byte, timestamp time.Time, value float64) { + datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} + // TODO(rartoul): Pool. + tags, err := GenerateTagsFromName(name) + if err != nil { + i.logger.Errorf("err generating tags from carbon name: %s, err: %s", + string(name), err) + return + } + + // TODO(rartoul): Context with timeout? + err = i.downsamplerAndWriter.Write( + context.Background(), tags, datapoints, xtime.Second) + if err != nil { + i.logger.Errorf("err writing carbon metric: %s, err: %s", + string(name), err) + } +} + func (i *ingester) Close() { // We don't maintain any state in-between connections so there is nothing to do here. } From ba5d5f21b64b7d45b34cfc2cb37b6f950578857c Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:14:15 -0500 Subject: [PATCH 44/70] add constant --- src/query/server/server.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/query/server/server.go b/src/query/server/server.go index 8d22eb3672..9a7e66b2f5 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -74,6 +74,8 @@ var ( Namespace: "default", Retention: 2 * 24 * time.Hour, } + + defaultCarbonIngesterListenAddress = "0.0.0.0:7204" ) type cleanupFn func() error @@ -304,8 +306,7 @@ func Run(runOpts RunOptions) { logger.Fatal("unable to create carbon ingester: %v", zap.Error(err)) } - // TODO: Default constants - listenAddress := fmt.Sprintf("0.0.0.0:7204") + listenAddress := defaultCarbonIngesterListenAddress if cfg.Carbon.ListenAddress != "" { listenAddress = cfg.Carbon.ListenAddress } From 1d80dbd581527c7a1935d949d7c16036d9a6238e Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:16:57 -0500 Subject: [PATCH 45/70] more and better logs --- src/query/server/server.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/src/query/server/server.go b/src/query/server/server.go index 9a7e66b2f5..2825531217 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -268,7 +268,7 @@ func Run(runOpts RunOptions) { } if cfg.Carbon != nil && cfg.Carbon.Enabled { - logger.Info("starting carbon server") + logger.Info("carbon ingestion enabled") var ( carbonIOpts = instrumentOptions.SetMetricsScope( @@ -291,7 +291,7 @@ func Run(runOpts RunOptions) { } workerPool, err := xsync.NewPooledWorkerPool(carbonWorkerPoolSize, carbonWorkerPoolOpts) if err != nil { - logger.Fatal("unable to create worker pool for carbon ingester: %v", zap.Error(err)) + logger.Fatal("unable to create worker pool for carbon ingester", zap.Error(err)) } workerPool.Init() @@ -303,7 +303,7 @@ func Run(runOpts RunOptions) { WorkerPool: workerPool, }) if err != nil { - logger.Fatal("unable to create carbon ingester: %v", zap.Error(err)) + logger.Fatal("unable to create carbon ingester", zap.Error(err)) } listenAddress := defaultCarbonIngesterListenAddress @@ -312,11 +312,14 @@ func Run(runOpts RunOptions) { } serverOpts := xserver.NewOptions().SetInstrumentOptions(carbonIOpts) server := xserver.NewServer(listenAddress, ingester, serverOpts) + + logger.Info("starting carbon ingestion server", zap.String("listenAddress", listenAddress)) err = server.ListenAndServe() if err != nil { - logger.Fatal("unable to start carbon ingestion server at listen address: %s, err: %v", + logger.Fatal("unable to start carbon ingestion server at listen address", zap.String("listenAddress", listenAddress), zap.Error(err)) } + logger.Info("started carbon ingestion server", zap.String("listenAddress", listenAddress)) } var interruptCh <-chan error = make(chan error) From 6cb50f7ad820dd2dd6d7399b24a2e83001d76cd7 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:17:30 -0500 Subject: [PATCH 46/70] camelcase yaml keys --- src/cmd/services/m3query/config/config.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go index 45755d4dcd..40a29ebf4c 100644 --- a/src/cmd/services/m3query/config/config.go +++ b/src/cmd/services/m3query/config/config.go @@ -140,8 +140,8 @@ type IngestConfiguration struct { // CarbonConfiguration is the configuration for the carbon server. type CarbonConfiguration struct { Enabled bool `yaml:"enabled"` - MaxConcurrency int `yaml:"max_concurrency"` - ListenAddress string `yaml:"listen_address"` + MaxConcurrency int `yaml:"maxConcurrency"` + ListenAddress string `yaml:"listenAddress"` } // LocalConfiguration is the local embedded configuration if running From 02c48c605fe02c8721d4ba8d3815c3417c99b7b4 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:18:09 -0500 Subject: [PATCH 47/70] fix error message --- src/query/api/v1/handler/prometheus/remote/write.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/api/v1/handler/prometheus/remote/write.go b/src/query/api/v1/handler/prometheus/remote/write.go index 4c177b6e03..55f75927cb 100644 --- a/src/query/api/v1/handler/prometheus/remote/write.go +++ b/src/query/api/v1/handler/prometheus/remote/write.go @@ -50,7 +50,7 @@ const ( ) var ( - errNoStorageOrDownsampler = errors.New("no storage or downsampler set, requires at least one or both") + errNoDownsamplerAndWriter = errors.New("no ingest.DownsamplerAndWriter was set") ) // PromWriteHandler represents a handler for prometheus write endpoint. From e11d56ad8cff0ad2dc071dce670270bcbb5a9203 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:18:29 -0500 Subject: [PATCH 48/70] fix nit --- src/cmd/services/m3coordinator/ingest/write.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/services/m3coordinator/ingest/write.go b/src/cmd/services/m3coordinator/ingest/write.go index f4d4aaae08..9bdc169e3e 100644 --- a/src/cmd/services/m3coordinator/ingest/write.go +++ b/src/cmd/services/m3coordinator/ingest/write.go @@ -128,7 +128,7 @@ func (d *downsamplerAndWriter) WriteBatch( iter DownsampleAndWriteIter, ) error { var ( - wg = &sync.WaitGroup{} + wg = sync.WaitGroup{} multiErr xerrors.MultiError errLock sync.Mutex addError = func(err error) { From 9b055d88540679a7ddaa44965e61bbbe8d7716e2 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:19:10 -0500 Subject: [PATCH 49/70] add const --- src/query/server/server.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/query/server/server.go b/src/query/server/server.go index 2825531217..75cb13d96a 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -75,7 +75,8 @@ var ( Retention: 2 * 24 * time.Hour, } - defaultCarbonIngesterListenAddress = "0.0.0.0:7204" + defaultCarbonIngesterListenAddress = "0.0.0.0:7204" + defaultCarbonIngesterWorkerPoolSize = 1024 ) type cleanupFn func() error @@ -286,8 +287,7 @@ func Run(runOpts RunOptions) { carbonWorkerPoolOpts = xsync.NewPooledWorkerPoolOptions(). SetGrowOnDemand(true). SetKillWorkerProbability(0.001) - // TODO: Constants? - carbonWorkerPoolSize = 1024 + carbonWorkerPoolSize = defaultCarbonIngesterWorkerPoolSize } workerPool, err := xsync.NewPooledWorkerPool(carbonWorkerPoolSize, carbonWorkerPoolOpts) if err != nil { From 9520f621f6da2690ced9e804f9becb0cb641a2c1 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:20:07 -0500 Subject: [PATCH 50/70] add newline --- scripts/docker-integration-tests/carbon/m3coordinator.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/docker-integration-tests/carbon/m3coordinator.yml b/scripts/docker-integration-tests/carbon/m3coordinator.yml index b015079adb..81e804d528 100644 --- a/scripts/docker-integration-tests/carbon/m3coordinator.yml +++ b/scripts/docker-integration-tests/carbon/m3coordinator.yml @@ -51,4 +51,4 @@ clusters: backgroundHealthCheckFailThrottleFactor: 0.5 carbon: - enabled: true \ No newline at end of file + enabled: true From 2ff51c8e1e0c40f06eed277f13a11c563670c346 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 17:20:43 -0500 Subject: [PATCH 51/70] remove unused file --- scripts/docker-integration-tests/carbon/python.py | 4 ---- 1 file changed, 4 deletions(-) delete mode 100644 scripts/docker-integration-tests/carbon/python.py diff --git a/scripts/docker-integration-tests/carbon/python.py b/scripts/docker-integration-tests/carbon/python.py deleted file mode 100644 index 3376001c57..0000000000 --- a/scripts/docker-integration-tests/carbon/python.py +++ /dev/null @@ -1,4 +0,0 @@ -import pipes -rawstring = r's=$(date +%s); curl -sSfg "http://localhost:7201/api/v1/query_range?start=$(( s - 300))&end=$s&step=10&query={__graphite0__='foo',__graphite1__='bar',__graphite2__='baz'}" | jq ".data.result[0].values[][1]==\"1\"" | grep -m 1 true' - -print pipes.quote(rawstring) \ No newline at end of file From 767861aa869ed97065d041a373e16230e3901d7e Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:02:59 -0500 Subject: [PATCH 52/70] generate some bad lines in the ingestion carbon test --- .../ingest/carbon/ingest_test.go | 26 +++++++++++++------ src/metrics/carbon/parser.go | 2 +- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index fc2ed386b6..cd9376083d 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -187,6 +187,7 @@ type testMetric struct { tags models.Tags timestamp int value float64 + isValid bool } func assertTestMetricsAreEqual(t *testing.T, a, b []testMetric) { @@ -204,8 +205,24 @@ func assertTestMetricsAreEqual(t *testing.T, a, b []testMetric) { } func init() { + var err error + testOptions.WorkerPool, err = xsync.NewPooledWorkerPool(16, xsync.NewPooledWorkerPoolOptions()) + if err != nil { + panic(err) + } + testOptions.WorkerPool.Init() + for i := 0; i < numLinesInTestPacket; i++ { - metric := []byte(fmt.Sprintf("test_metric_%d", i)) + var metric []byte + + if i%10 == 0 { + // Make 1 in 10 lines invalid to test the error paths. + line := []byte(fmt.Sprintf("garbage line %d\n", i)) + testPacket = append(testPacket, line...) + continue + } + + metric = []byte(fmt.Sprintf("test_metric_%d", i)) tags, err := GenerateTagsFromName(metric) if err != nil { @@ -221,11 +238,4 @@ func init() { line := []byte(fmt.Sprintf("%s %d %d\n", string(metric), i, i)) testPacket = append(testPacket, line...) } - - var err error - testOptions.WorkerPool, err = xsync.NewPooledWorkerPool(16, xsync.NewPooledWorkerPoolOptions()) - if err != nil { - panic(err) - } - testOptions.WorkerPool.Init() } diff --git a/src/metrics/carbon/parser.go b/src/metrics/carbon/parser.go index 06558a14d1..764f3587a2 100644 --- a/src/metrics/carbon/parser.go +++ b/src/metrics/carbon/parser.go @@ -238,7 +238,7 @@ type Scanner struct { func NewScanner(r io.Reader, iOpts instrument.Options) *Scanner { s := bufio.NewScanner(r) s.Split(bufio.ScanLines) - return &Scanner{scanner: s} + return &Scanner{scanner: s, iOpts: iOpts} } // Scan scans for the next carbon metric. Malformed metrics are skipped but counted. From dcf2151c232b2718bbaebbf461da281c0441835b Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:03:05 -0500 Subject: [PATCH 53/70] fix compilation issue --- src/query/api/v1/handler/prometheus/remote/write.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/api/v1/handler/prometheus/remote/write.go b/src/query/api/v1/handler/prometheus/remote/write.go index 55f75927cb..1c89d7ec1c 100644 --- a/src/query/api/v1/handler/prometheus/remote/write.go +++ b/src/query/api/v1/handler/prometheus/remote/write.go @@ -67,7 +67,7 @@ func NewPromWriteHandler( scope tally.Scope, ) (http.Handler, error) { if downsamplerAndWriter == nil { - return nil, errNoStorageOrDownsampler + return nil, errNoDownsamplerAndWriter } return &PromWriteHandler{ From cd32d8661ef4bcd09e85bf0873ee44868d95b8a2 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:08:41 -0500 Subject: [PATCH 54/70] add more tests --- .../ingest/carbon/ingest_test.go | 26 ++++++++++++++----- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index cd9376083d..ee73adefde 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -23,6 +23,7 @@ package ingestcarbon import ( "bytes" "context" + "errors" "fmt" "io" "net" @@ -65,8 +66,10 @@ func TestIngesterHandleConn(t *testing.T) { mockDownsamplerAndWriter := ingest.NewMockDownsamplerAndWriter(ctrl) var ( - foundLock = sync.Mutex{} - found = []testMetric{} + lock = sync.Mutex{} + + found = []testMetric{} + idx = 0 ) mockDownsamplerAndWriter.EXPECT(). Write(gomock.Any(), gomock.Any(), gomock.Any(), xtime.Second).DoAndReturn(func( @@ -74,12 +77,21 @@ func TestIngesterHandleConn(t *testing.T) { tags models.Tags, dp ts.Datapoints, unit xtime.Unit, - ) { - foundLock.Lock() + ) interface{} { + lock.Lock() found = append(found, testMetric{ tags: tags, timestamp: int(dp[0].Timestamp.Unix()), value: dp[0].Value}) - foundLock.Unlock() - }).Return(nil).AnyTimes() + + // Make 1 in 10 writes fail to test those paths. + returnErr := idx%10 == 0 + idx++ + lock.Unlock() + + if returnErr { + return errors.New("some_error") + } + return nil + }).AnyTimes() byteConn := &byteConn{b: bytes.NewBuffer(testPacket)} ingester, err := NewIngester(mockDownsamplerAndWriter, testOptions) @@ -217,7 +229,7 @@ func init() { if i%10 == 0 { // Make 1 in 10 lines invalid to test the error paths. - line := []byte(fmt.Sprintf("garbage line %d\n", i)) + line := []byte(fmt.Sprintf("garbage line %d \n", i)) testPacket = append(testPacket, line...) continue } From b9b2013ef366e7d6e8dce83f275ce0324a323309 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:16:21 -0500 Subject: [PATCH 55/70] improve test to try more error paths --- .../m3coordinator/ingest/carbon/ingest_test.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index ee73adefde..05da380737 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -26,6 +26,7 @@ import ( "errors" "fmt" "io" + "math/rand" "net" "sort" "sync" @@ -229,12 +230,20 @@ func init() { if i%10 == 0 { // Make 1 in 10 lines invalid to test the error paths. - line := []byte(fmt.Sprintf("garbage line %d \n", i)) - testPacket = append(testPacket, line...) - continue + if rand.Intn(2) == 0 { + // Invalid line altogether. + line := []byte(fmt.Sprintf("garbage line %d \n", i)) + testPacket = append(testPacket, line...) + continue + } else { + // Valid line, but invalid name (too many separators). + line := []byte(fmt.Sprintf("test..metric..%d %d %d\n", i, i, i)) + testPacket = append(testPacket, line...) + continue + } } - metric = []byte(fmt.Sprintf("test_metric_%d", i)) + metric = []byte(fmt.Sprintf("test.metric.%d", i)) tags, err := GenerateTagsFromName(metric) if err != nil { From 7e113608e6064714229c996767331f8947b37ee4 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:26:35 -0500 Subject: [PATCH 56/70] add timeouts for writes --- .../m3coordinator/ingest/carbon/ingest.go | 18 +++++++++++++++--- src/cmd/services/m3query/config/config.go | 7 ++++--- src/query/server/server.go | 1 + 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 39b1ce7d01..032f293ccd 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -61,6 +61,7 @@ var ( type Options struct { InstrumentOptions instrument.Options WorkerPool xsync.PooledWorkerPool + Timeout time.Duration } // Validate validates the options struct. @@ -142,13 +143,24 @@ func (i *ingester) write(name []byte, timestamp time.Time, value float64) { return } - // TODO(rartoul): Context with timeout? - err = i.downsamplerAndWriter.Write( - context.Background(), tags, datapoints, xtime.Second) + var ( + ctx context.Context + cleanup func() + ) + context.Background() + if i.opts.Timeout > 0 { + ctx, cleanup = context.WithTimeout(ctx, i.opts.Timeout) + } + + err = i.downsamplerAndWriter.Write(ctx, tags, datapoints, xtime.Second) if err != nil { i.logger.Errorf("err writing carbon metric: %s, err: %s", string(name), err) } + + if cleanup != nil { + cleanup() + } } func (i *ingester) Close() { diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go index 40a29ebf4c..9eebf497c4 100644 --- a/src/cmd/services/m3query/config/config.go +++ b/src/cmd/services/m3query/config/config.go @@ -139,9 +139,10 @@ type IngestConfiguration struct { // TODO(rartoul): Make this enabled by default. // CarbonConfiguration is the configuration for the carbon server. type CarbonConfiguration struct { - Enabled bool `yaml:"enabled"` - MaxConcurrency int `yaml:"maxConcurrency"` - ListenAddress string `yaml:"listenAddress"` + Enabled bool `yaml:"enabled"` + MaxConcurrency int `yaml:"maxConcurrency"` + ListenAddress string `yaml:"listenAddress"` + Timeout time.Duration `yaml:"timeout"` } // LocalConfiguration is the local embedded configuration if running diff --git a/src/query/server/server.go b/src/query/server/server.go index 75cb13d96a..075d3dd74b 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -301,6 +301,7 @@ func Run(runOpts RunOptions) { ingester, err := ingestcarbon.NewIngester(carbonIngestDownsamplerAndWriter, ingestcarbon.Options{ InstrumentOptions: carbonIOpts, WorkerPool: workerPool, + Timeout: cfg.Carbon.Timeout, }) if err != nil { logger.Fatal("unable to create carbon ingester", zap.Error(err)) From 2d08dcbd59135a1790b657506b8ae14f15b15296 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:35:55 -0500 Subject: [PATCH 57/70] add metrics --- .../m3coordinator/ingest/carbon/ingest.go | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 032f293ccd..1b1c1b3584 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -91,6 +91,8 @@ func NewIngester( downsamplerAndWriter: downsamplerAndWriter, opts: opts, logger: opts.InstrumentOptions.Logger(), + metrics: newCarbonIngesterMetrics( + opts.InstrumentOptions.MetricsScope()), }, nil } @@ -98,6 +100,7 @@ type ingester struct { downsamplerAndWriter ingest.DownsamplerAndWriter opts Options logger log.Logger + metrics carbonIngesterMetrics } // TODO(rartoul): Emit metrics @@ -111,15 +114,21 @@ func (i *ingester) Handle(conn net.Conn) { logger.Debug("handling new carbon ingestion connection") for s.Scan() { name, timestamp, value := s.Metric() - // Copy name since scanner bytes are recycled. // TODO(rartoul): Pool. + // Copy name since scanner bytes are recycled. name = append([]byte(nil), name...) wg.Add(1) i.opts.WorkerPool.Go(func() { - i.write(name, timestamp, value) + ok := i.write(name, timestamp, value) + if ok { + i.metrics.success.Inc(1) + } wg.Done() }) + + i.metrics.malformed.Inc(s.MalformedCount) + s.MalformedCount = 0 } if err := s.Err(); err != nil { @@ -133,21 +142,21 @@ func (i *ingester) Handle(conn net.Conn) { // Don't close the connection, that is the server's responsibility. } -func (i *ingester) write(name []byte, timestamp time.Time, value float64) { +func (i *ingester) write(name []byte, timestamp time.Time, value float64) bool { datapoints := []ts.Datapoint{{Timestamp: timestamp, Value: value}} // TODO(rartoul): Pool. tags, err := GenerateTagsFromName(name) if err != nil { i.logger.Errorf("err generating tags from carbon name: %s, err: %s", string(name), err) - return + i.metrics.malformed.Inc(1) + return false } var ( - ctx context.Context + ctx = context.Background() cleanup func() ) - context.Background() if i.opts.Timeout > 0 { ctx, cleanup = context.WithTimeout(ctx, i.opts.Timeout) } @@ -156,29 +165,32 @@ func (i *ingester) write(name []byte, timestamp time.Time, value float64) { if err != nil { i.logger.Errorf("err writing carbon metric: %s, err: %s", string(name), err) + i.metrics.err.Inc(1) + return false } if cleanup != nil { cleanup() } + return true } func (i *ingester) Close() { // We don't maintain any state in-between connections so there is nothing to do here. } -func newCarbonIngesterMetrics(m tally.Scope) carbonHandlerMetrics { - writesScope := m.SubScope("writes") - return carbonHandlerMetrics{ - unresolvedIDs: writesScope.Counter("ids-policy-unresolved"), - malformedCounter: writesScope.Counter("malformed"), +func newCarbonIngesterMetrics(m tally.Scope) carbonIngesterMetrics { + return carbonIngesterMetrics{ + success: m.Counter("success"), + err: m.Counter("error"), + malformed: m.Counter("malformed"), } } -type carbonHandlerMetrics struct { - unresolvedIDs tally.Counter - malformedCounter tally.Counter - readTimeLatency tally.Timer +type carbonIngesterMetrics struct { + success tally.Counter + err tally.Counter + malformed tally.Counter } // GenerateTagsFromName accepts a carbon metric name and blows it up into a list of From c83d2c3a88841b93f33898c393715b89f2901421 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:36:21 -0500 Subject: [PATCH 58/70] add metrics --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index 1b1c1b3584..feb792f241 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -127,7 +127,7 @@ func (i *ingester) Handle(conn net.Conn) { wg.Done() }) - i.metrics.malformed.Inc(s.MalformedCount) + i.metrics.malformed.Inc(int64(s.MalformedCount)) s.MalformedCount = 0 } From bb1e055f5ff61b58ade592548dc2a9577b2c8245 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 18:36:42 -0500 Subject: [PATCH 59/70] delete TODO --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 1 - 1 file changed, 1 deletion(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index feb792f241..bcd145416f 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -103,7 +103,6 @@ type ingester struct { metrics carbonIngesterMetrics } -// TODO(rartoul): Emit metrics func (i *ingester) Handle(conn net.Conn) { var ( wg = sync.WaitGroup{} From 67de87b18605c3222b398c3eb9eabd0a51516313 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 20:13:40 -0500 Subject: [PATCH 60/70] fix lint issues --- .../ingest/carbon/ingest_test.go | 3 +- src/cmd/services/m3query/config/config.go | 65 ++++++++++++++++--- src/metrics/carbon/parser_test.go | 1 + src/query/server/server.go | 4 +- 4 files changed, 59 insertions(+), 14 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go index 05da380737..4d36a617c4 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest_test.go @@ -33,8 +33,6 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" - "github.com/m3db/m3/src/cmd/services/m3coordinator/ingest" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/ts" @@ -42,6 +40,7 @@ import ( xsync "github.com/m3db/m3x/sync" xtime "github.com/m3db/m3x/time" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" ) diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go index 9eebf497c4..6c0c21bec8 100644 --- a/src/cmd/services/m3query/config/config.go +++ b/src/cmd/services/m3query/config/config.go @@ -42,13 +42,20 @@ const ( GRPCStorageType BackendStorageType = "grpc" // M3DBStorageType is for m3db backend. M3DBStorageType BackendStorageType = "m3db" + + defaultCarbonIngesterListenAddress = "0.0.0.0:7204" ) -// defaultLimitsConfiguration is applied if `limits` isn't specified. -var defaultLimitsConfiguration = &LimitsConfiguration{ - // this is sufficient for 1 day span / 1s step, or 60 days with a 1m step. - MaxComputedDatapoints: 86400, -} +var ( + defaultCarbonIngesterTimeout = 5 * time.Second + defaultCarbonIngesterEnabled = true + + // defaultLimitsConfiguration is applied if `limits` isn't specified. + defaultLimitsConfiguration = &LimitsConfiguration{ + // this is sufficient for 1 day span / 1s step, or 60 days with a 1m step. + MaxComputedDatapoints: 86400, + } +) // Configuration is the configuration for the query service. type Configuration struct { @@ -101,6 +108,40 @@ type Configuration struct { Limits LimitsConfiguration `yaml:"limits"` } +// CarbonConfiguration returns the carbon configuration. +func (c *Configuration) CarbonConfiguration() *CarbonConfiguration { + if c.Carbon == nil { + // Nothing was specified in the configuration, use defaults. + return &CarbonConfiguration{ + Ingestion: &CarbonIngestionConfiguration{ + Enabled: &defaultCarbonIngesterEnabled, + ListenAddress: defaultCarbonIngesterListenAddress, + Timeout: &defaultCarbonIngesterTimeout, + }, + } + } + + if c.Carbon.Ingestion.Enabled == nil { + // If they have set any carbon configuration and haven't explicitly marked + // the ingestion as disabled, then assume they want it enabled. + c.Carbon.Ingestion.Enabled = &defaultCarbonIngesterEnabled + } + + if c.Carbon.Ingestion.ListenAddress == "" { + // If they have set any carbon configuration and haven't explicitly set + // the listen address, then assume they want to use the default one. + c.Carbon.Ingestion.ListenAddress = defaultCarbonIngesterListenAddress + } + + if c.Carbon.Ingestion.Timeout == nil { + // If they have set any carbon configuration and haven't explicitly set + // a timeout, then assume they want the default value. + c.Carbon.Ingestion.Timeout = &defaultCarbonIngesterTimeout + } + + return c.Carbon +} + // Filter is a query filter type. type Filter string @@ -136,13 +177,17 @@ type IngestConfiguration struct { M3Msg m3msg.Configuration `yaml:"m3msg"` } -// TODO(rartoul): Make this enabled by default. // CarbonConfiguration is the configuration for the carbon server. type CarbonConfiguration struct { - Enabled bool `yaml:"enabled"` - MaxConcurrency int `yaml:"maxConcurrency"` - ListenAddress string `yaml:"listenAddress"` - Timeout time.Duration `yaml:"timeout"` + Ingestion *CarbonIngestionConfiguration +} + +// CarbonIngestionConfiguration is the configuration struct for carbon ingestion. +type CarbonIngestionConfiguration struct { + Enabled *bool `yaml:"enabled"` + MaxConcurrency int `yaml:"maxConcurrency"` + ListenAddress string `yaml:"listenAddress"` + Timeout *time.Duration `yaml:"timeout"` } // LocalConfiguration is the local embedded configuration if running diff --git a/src/metrics/carbon/parser_test.go b/src/metrics/carbon/parser_test.go index 193cb2c43b..d609ca7959 100644 --- a/src/metrics/carbon/parser_test.go +++ b/src/metrics/carbon/parser_test.go @@ -29,6 +29,7 @@ import ( "time" "github.com/m3db/m3x/instrument" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/src/query/server/server.go b/src/query/server/server.go index 075d3dd74b..f78547a28d 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -75,7 +75,6 @@ var ( Retention: 2 * 24 * time.Hour, } - defaultCarbonIngesterListenAddress = "0.0.0.0:7204" defaultCarbonIngesterWorkerPoolSize = 1024 ) @@ -268,7 +267,8 @@ func Run(runOpts RunOptions) { logger.Info("no m3msg server configured") } - if cfg.Carbon != nil && cfg.Carbon.Enabled { + carbonConfig := cfg.CarbonConfiguration().Ingestion + if *carbonConfig.Enabled { logger.Info("carbon ingestion enabled") var ( From 7d4504de3654b6cd543e2f8f5f81c15e676510c7 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 20:34:01 -0500 Subject: [PATCH 61/70] fix server.go --- src/query/server/server.go | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/src/query/server/server.go b/src/query/server/server.go index f78547a28d..c2aa5bcf52 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -277,12 +277,12 @@ func Run(runOpts RunOptions) { carbonWorkerPoolOpts xsync.PooledWorkerPoolOptions carbonWorkerPoolSize int ) - if cfg.Carbon.MaxConcurrency > 0 { + if carbonConfig.MaxConcurrency > 0 { // Use a bounded worker pool if they requested a specific maximum concurrency. carbonWorkerPoolOpts = xsync.NewPooledWorkerPoolOptions(). SetGrowOnDemand(false). SetInstrumentOptions(carbonIOpts) - carbonWorkerPoolSize = cfg.Carbon.MaxConcurrency + carbonWorkerPoolSize = carbonConfig.MaxConcurrency } else { carbonWorkerPoolOpts = xsync.NewPooledWorkerPoolOptions(). SetGrowOnDemand(true). @@ -301,18 +301,14 @@ func Run(runOpts RunOptions) { ingester, err := ingestcarbon.NewIngester(carbonIngestDownsamplerAndWriter, ingestcarbon.Options{ InstrumentOptions: carbonIOpts, WorkerPool: workerPool, - Timeout: cfg.Carbon.Timeout, + Timeout: *carbonConfig.Timeout, }) if err != nil { logger.Fatal("unable to create carbon ingester", zap.Error(err)) } - listenAddress := defaultCarbonIngesterListenAddress - if cfg.Carbon.ListenAddress != "" { - listenAddress = cfg.Carbon.ListenAddress - } serverOpts := xserver.NewOptions().SetInstrumentOptions(carbonIOpts) - server := xserver.NewServer(listenAddress, ingester, serverOpts) + server := xserver.NewServer(carbonConfig.ListenAddress, ingester, serverOpts) logger.Info("starting carbon ingestion server", zap.String("listenAddress", listenAddress)) err = server.ListenAndServe() From 0fdac03402de4b1c1a5e8e1a00880e69eeb1d3a4 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 20:35:35 -0500 Subject: [PATCH 62/70] fix server.go --- src/query/server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/server/server.go b/src/query/server/server.go index c2aa5bcf52..3e46c45b77 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -314,7 +314,7 @@ func Run(runOpts RunOptions) { err = server.ListenAndServe() if err != nil { logger.Fatal("unable to start carbon ingestion server at listen address", - zap.String("listenAddress", listenAddress), zap.Error(err)) + zap.String("listenAddress", carbonConfig.ListenAddress), zap.Error(err)) } logger.Info("started carbon ingestion server", zap.String("listenAddress", listenAddress)) } From 94579aab8639b36d0ebc2563146622b3f9002d34 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 22:16:42 -0500 Subject: [PATCH 63/70] fix broken test --- src/query/server/server.go | 8 ++++---- src/query/server/server_test.go | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/src/query/server/server.go b/src/query/server/server.go index 3e46c45b77..e069e5dcfb 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -308,15 +308,15 @@ func Run(runOpts RunOptions) { } serverOpts := xserver.NewOptions().SetInstrumentOptions(carbonIOpts) - server := xserver.NewServer(carbonConfig.ListenAddress, ingester, serverOpts) + carbonServer := xserver.NewServer(carbonConfig.ListenAddress, ingester, serverOpts) - logger.Info("starting carbon ingestion server", zap.String("listenAddress", listenAddress)) - err = server.ListenAndServe() + logger.Info("starting carbon ingestion server", zap.String("listenAddress", carbonConfig.ListenAddress)) + err = carbonServer.ListenAndServe() if err != nil { logger.Fatal("unable to start carbon ingestion server at listen address", zap.String("listenAddress", carbonConfig.ListenAddress), zap.Error(err)) } - logger.Info("started carbon ingestion server", zap.String("listenAddress", listenAddress)) + logger.Info("started carbon ingestion server", zap.String("listenAddress", carbonConfig.ListenAddress)) } var interruptCh <-chan error = make(chan error) diff --git a/src/query/server/server_test.go b/src/query/server/server_test.go index af0434ea97..02af237980 100644 --- a/src/query/server/server_test.go +++ b/src/query/server/server_test.go @@ -234,6 +234,10 @@ listenAddress: type: "config" value: "127.0.0.1:17201" +carbon: + ingestion: + listenAddress: "127.0.0.1:17204" + metrics: scope: prefix: "coordinator" From e35158a56dce3d32c5e68a0301bfce94c0b1e0b6 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 22:20:18 -0500 Subject: [PATCH 64/70] fix cleanup code --- src/cmd/services/m3coordinator/ingest/carbon/ingest.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go index bcd145416f..6db162ccbb 100644 --- a/src/cmd/services/m3coordinator/ingest/carbon/ingest.go +++ b/src/cmd/services/m3coordinator/ingest/carbon/ingest.go @@ -161,6 +161,10 @@ func (i *ingester) write(name []byte, timestamp time.Time, value float64) bool { } err = i.downsamplerAndWriter.Write(ctx, tags, datapoints, xtime.Second) + if cleanup != nil { + cleanup() + } + if err != nil { i.logger.Errorf("err writing carbon metric: %s, err: %s", string(name), err) @@ -168,9 +172,6 @@ func (i *ingester) write(name []byte, timestamp time.Time, value float64) bool { return false } - if cleanup != nil { - cleanup() - } return true } From 48074430970e6582a6b94ebf84257da8e796247d Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 22:30:50 -0500 Subject: [PATCH 65/70] address config feedback --- src/cmd/services/m3query/config/config.go | 68 +++++++++++------------ src/query/server/server.go | 23 ++++---- 2 files changed, 45 insertions(+), 46 deletions(-) diff --git a/src/cmd/services/m3query/config/config.go b/src/cmd/services/m3query/config/config.go index 6c0c21bec8..747e3e5637 100644 --- a/src/cmd/services/m3query/config/config.go +++ b/src/cmd/services/m3query/config/config.go @@ -102,46 +102,12 @@ type Configuration struct { Ingest *IngestConfiguration `yaml:"ingest"` // Carbon is the carbon configuration. - Carbon *CarbonConfiguration `yaml:"carbon"` + Carbon CarbonConfiguration `yaml:"carbon"` // Limits specifies limits on per-query resource usage. Limits LimitsConfiguration `yaml:"limits"` } -// CarbonConfiguration returns the carbon configuration. -func (c *Configuration) CarbonConfiguration() *CarbonConfiguration { - if c.Carbon == nil { - // Nothing was specified in the configuration, use defaults. - return &CarbonConfiguration{ - Ingestion: &CarbonIngestionConfiguration{ - Enabled: &defaultCarbonIngesterEnabled, - ListenAddress: defaultCarbonIngesterListenAddress, - Timeout: &defaultCarbonIngesterTimeout, - }, - } - } - - if c.Carbon.Ingestion.Enabled == nil { - // If they have set any carbon configuration and haven't explicitly marked - // the ingestion as disabled, then assume they want it enabled. - c.Carbon.Ingestion.Enabled = &defaultCarbonIngesterEnabled - } - - if c.Carbon.Ingestion.ListenAddress == "" { - // If they have set any carbon configuration and haven't explicitly set - // the listen address, then assume they want to use the default one. - c.Carbon.Ingestion.ListenAddress = defaultCarbonIngesterListenAddress - } - - if c.Carbon.Ingestion.Timeout == nil { - // If they have set any carbon configuration and haven't explicitly set - // a timeout, then assume they want the default value. - c.Carbon.Ingestion.Timeout = &defaultCarbonIngesterTimeout - } - - return c.Carbon -} - // Filter is a query filter type. type Filter string @@ -179,7 +145,7 @@ type IngestConfiguration struct { // CarbonConfiguration is the configuration for the carbon server. type CarbonConfiguration struct { - Ingestion *CarbonIngestionConfiguration + Ingestion CarbonIngestionConfiguration } // CarbonIngestionConfiguration is the configuration struct for carbon ingestion. @@ -190,6 +156,36 @@ type CarbonIngestionConfiguration struct { Timeout *time.Duration `yaml:"timeout"` } +// EnabledOrDefault returns the configured value for Enabled, if set, or the default +// value otherwise. +func (c *CarbonIngestionConfiguration) EnabledOrDefault() bool { + if c.Enabled != nil { + return *c.Enabled + } + + return defaultCarbonIngesterEnabled +} + +// TimeoutOrDefault returns the configured value for Timeout, if set, or the default +// value otherwise. +func (c *CarbonIngestionConfiguration) TimeoutOrDefault() time.Duration { + if c.Timeout != nil { + return *c.Timeout + } + + return defaultCarbonIngesterTimeout +} + +// ListenAddressOrDefault returns the configured value for ListenAddress, if set, or the default +// value otherwise. +func (c *CarbonIngestionConfiguration) ListenAddressOrDefault() string { + if c.ListenAddress != "" { + return c.ListenAddress + } + + return defaultCarbonIngesterListenAddress +} + // LocalConfiguration is the local embedded configuration if running // coordinator embedded in the DB. type LocalConfiguration struct { diff --git a/src/query/server/server.go b/src/query/server/server.go index e069e5dcfb..f34d059b5e 100644 --- a/src/query/server/server.go +++ b/src/query/server/server.go @@ -267,8 +267,8 @@ func Run(runOpts RunOptions) { logger.Info("no m3msg server configured") } - carbonConfig := cfg.CarbonConfiguration().Ingestion - if *carbonConfig.Enabled { + carbonIngestConfig := cfg.Carbon.Ingestion + if carbonIngestConfig.EnabledOrDefault() { logger.Info("carbon ingestion enabled") var ( @@ -277,12 +277,12 @@ func Run(runOpts RunOptions) { carbonWorkerPoolOpts xsync.PooledWorkerPoolOptions carbonWorkerPoolSize int ) - if carbonConfig.MaxConcurrency > 0 { + if carbonIngestConfig.MaxConcurrency > 0 { // Use a bounded worker pool if they requested a specific maximum concurrency. carbonWorkerPoolOpts = xsync.NewPooledWorkerPoolOptions(). SetGrowOnDemand(false). SetInstrumentOptions(carbonIOpts) - carbonWorkerPoolSize = carbonConfig.MaxConcurrency + carbonWorkerPoolSize = carbonIngestConfig.MaxConcurrency } else { carbonWorkerPoolOpts = xsync.NewPooledWorkerPoolOptions(). SetGrowOnDemand(true). @@ -301,22 +301,25 @@ func Run(runOpts RunOptions) { ingester, err := ingestcarbon.NewIngester(carbonIngestDownsamplerAndWriter, ingestcarbon.Options{ InstrumentOptions: carbonIOpts, WorkerPool: workerPool, - Timeout: *carbonConfig.Timeout, + Timeout: carbonIngestConfig.TimeoutOrDefault(), }) if err != nil { logger.Fatal("unable to create carbon ingester", zap.Error(err)) } - serverOpts := xserver.NewOptions().SetInstrumentOptions(carbonIOpts) - carbonServer := xserver.NewServer(carbonConfig.ListenAddress, ingester, serverOpts) + var ( + serverOpts = xserver.NewOptions().SetInstrumentOptions(carbonIOpts) + carbonListenAddress = carbonIngestConfig.ListenAddressOrDefault() + carbonServer = xserver.NewServer(carbonListenAddress, ingester, serverOpts) + ) - logger.Info("starting carbon ingestion server", zap.String("listenAddress", carbonConfig.ListenAddress)) + logger.Info("starting carbon ingestion server", zap.String("listenAddress", carbonListenAddress)) err = carbonServer.ListenAndServe() if err != nil { logger.Fatal("unable to start carbon ingestion server at listen address", - zap.String("listenAddress", carbonConfig.ListenAddress), zap.Error(err)) + zap.String("listenAddress", carbonListenAddress), zap.Error(err)) } - logger.Info("started carbon ingestion server", zap.String("listenAddress", carbonConfig.ListenAddress)) + logger.Info("started carbon ingestion server", zap.String("listenAddress", carbonListenAddress)) } var interruptCh <-chan error = make(chan error) From e27716fcd8566f7b78db2a96817d8e24e02aca2a Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Thu, 24 Jan 2019 22:37:50 -0500 Subject: [PATCH 66/70] fix bbroken integration test --- scripts/docker-integration-tests/simple/test.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/docker-integration-tests/simple/test.sh b/scripts/docker-integration-tests/simple/test.sh index a7ee35a9bb..cd78854b91 100755 --- a/scripts/docker-integration-tests/simple/test.sh +++ b/scripts/docker-integration-tests/simple/test.sh @@ -11,11 +11,11 @@ CONTAINER_NAME="m3dbnode-version-${REVISION}" docker create --name "${CONTAINER_NAME}" -p 9000:9000 -p 9001:9001 -p 9002:9002 -p 9003:9003 -p 9004:9004 -p 7201:7201 "m3dbnode_integration:${REVISION}" # think of this as a defer func() in golang -function defer { - echo "Remove docker container" - docker rm --force "${CONTAINER_NAME}" -} -trap defer EXIT +# function defer { +# echo "Remove docker container" +# docker rm --force "${CONTAINER_NAME}" +# } +# trap defer EXIT docker start "${CONTAINER_NAME}" if [ $? -ne 0 ]; then @@ -28,7 +28,7 @@ setup_single_m3db_node echo "Write data" curl -vvvsS -X POST 0.0.0.0:9003/writetagged -d '{ - "namespace": "default", + "namespace": "unagg", "id": "foo", "tags": [ { @@ -48,7 +48,7 @@ curl -vvvsS -X POST 0.0.0.0:9003/writetagged -d '{ echo "Read data" queryResult=$(curl -sSf -X POST 0.0.0.0:9003/query -d '{ - "namespace": "default", + "namespace": "unagg", "query": { "regexp": { "field": "city", @@ -70,4 +70,4 @@ echo "Deleting placement" curl -vvvsSf -X DELETE 0.0.0.0:7201/api/v1/placement echo "Deleting namespace" -curl -vvvsSf -X DELETE 0.0.0.0:7201/api/v1/namespace/default +curl -vvvsSf -X DELETE 0.0.0.0:7201/api/v1/namespace/unagg From 51dbe03c6e8e083059aaed0b341835ec0b2b977e Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Fri, 25 Jan 2019 10:15:11 -0500 Subject: [PATCH 67/70] reduce shards in prop test --- .../bootstrap/bootstrapper/commitlog/source_prop_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go index 768e1c1da5..c8968549b3 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_prop_test.go @@ -56,7 +56,7 @@ import ( "github.com/stretchr/testify/require" ) -const maxShards = 8192 +const maxShards = 1024 const blockSize = 2 * time.Hour var ( From 379af5e466c7d6126b362b9cd61a0ab2540eca35 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Fri, 25 Jan 2019 11:26:21 -0500 Subject: [PATCH 68/70] fix broken integration test --- .../docker-integration-tests/simple/test.sh | 103 +++++++++++++++++- 1 file changed, 97 insertions(+), 6 deletions(-) diff --git a/scripts/docker-integration-tests/simple/test.sh b/scripts/docker-integration-tests/simple/test.sh index cd78854b91..d8776a171a 100755 --- a/scripts/docker-integration-tests/simple/test.sh +++ b/scripts/docker-integration-tests/simple/test.sh @@ -11,11 +11,11 @@ CONTAINER_NAME="m3dbnode-version-${REVISION}" docker create --name "${CONTAINER_NAME}" -p 9000:9000 -p 9001:9001 -p 9002:9002 -p 9003:9003 -p 9004:9004 -p 7201:7201 "m3dbnode_integration:${REVISION}" # think of this as a defer func() in golang -# function defer { -# echo "Remove docker container" -# docker rm --force "${CONTAINER_NAME}" -# } -# trap defer EXIT +function defer { + echo "Remove docker container" + docker rm --force "${CONTAINER_NAME}" +} +trap defer EXIT docker start "${CONTAINER_NAME}" if [ $? -ne 0 ]; then @@ -24,7 +24,98 @@ if [ $? -ne 0 ]; then exit 1 fi -setup_single_m3db_node +# TODO(rartoul): Rewrite this test to use a docker-compose file like the others so that we can share all the +# DB initialization logic with the setup_single_m3db_node command in common.sh like the other files. Right now +# we can't do that because this test doesn't use the docker-compose networking so we have to specify 127.0.0.1 +# as the endpoint in the placement instead of being able to use dbnode01. +echo "Sleeping for a bit to ensure db up" +sleep 15 # TODO Replace sleeps with logic to determine when to proceed + + echo "Adding namespace" + curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/namespace -d '{ + "name": "agg", + "options": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "snapshotEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodDuration": "48h", + "blockSizeDuration": "2h", + "bufferFutureDuration": "10m", + "bufferPastDuration": "10m", + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodDuration": "5m" + }, + "indexOptions": { + "enabled": true, + "blockSizeDuration": "2h" + } + } + }' + + echo "Sleep until namespace is init'd" + ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/namespace | jq .registry.namespaces.agg.indexOptions.enabled)" == true ]' + + curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/namespace -d '{ + "name": "unagg", + "options": { + "bootstrapEnabled": true, + "flushEnabled": true, + "writesToCommitLog": true, + "cleanupEnabled": true, + "snapshotEnabled": true, + "repairEnabled": false, + "retentionOptions": { + "retentionPeriodDuration": "48h", + "blockSizeDuration": "2h", + "bufferFutureDuration": "10m", + "bufferPastDuration": "10m", + "blockDataExpiry": true, + "blockDataExpiryAfterNotAccessPeriodDuration": "5m" + }, + "indexOptions": { + "enabled": true, + "blockSizeDuration": "2h" + } + } + }' + + echo "Sleep until namespace is init'd" + ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/namespace | jq .registry.namespaces.unagg.indexOptions.enabled)" == true ]' + + echo "Placement initialization" + curl -vvvsSf -X POST 0.0.0.0:7201/api/v1/placement/init -d '{ + "num_shards": 64, + "replication_factor": 1, + "instances": [ + { + "id": "m3db_local", + "isolation_group": "rack-a", + "zone": "embedded", + "weight": 1024, + "endpoint": "127.0.0.1::9000", + "hostname": "127.0.0.1:", + "port": 9000 + } + ] + }' + + echo "Sleep until placement is init'd" + ATTEMPTS=4 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | jq .placement.instances.m3db_local.id)" == \"m3db_local\" ]' + + echo "Sleep until bootstrapped" + ATTEMPTS=10 TIMEOUT=2 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:9002/health | jq .bootstrapped)" == true ]' + + echo "Waiting until shards are marked as available" + ATTEMPTS=10 TIMEOUT=1 retry_with_backoff \ + '[ "$(curl -sSf 0.0.0.0:7201/api/v1/placement | grep -c INITIALIZING)" -eq 0 ]' echo "Write data" curl -vvvsS -X POST 0.0.0.0:9003/writetagged -d '{ From b9ff591a0477d2e3289c3b797eb0c55ff005a3e9 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Fri, 25 Jan 2019 12:46:23 -0500 Subject: [PATCH 69/70] close readers when done --- .../storage/bootstrap/bootstrapper/commitlog/source.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go index 53aacb42e2..51a72deb1c 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source.go @@ -630,6 +630,14 @@ func (s *commitLogSource) bootstrapShardBlockSnapshot( if err != nil { return shardResult, err } + defer func() { + err := reader.Close() + if err != nil { + s.log.Errorf( + "error closing reader for shard: %d and blockstart: %s and volume: %d, err: %s", + shard, blockStart.String(), mostRecentCompleteSnapshot.ID.VolumeIndex, err.Error()) + } + }() s.log.Debugf( "reading snapshot for shard: %d and blockStart: %s and volume: %d", From 29c5a88bcb0f45a59c6706262b69f1123a1a89e5 Mon Sep 17 00:00:00 2001 From: Richard Artoul Date: Fri, 25 Jan 2019 12:54:51 -0500 Subject: [PATCH 70/70] fix unit test --- .../storage/bootstrap/bootstrapper/commitlog/source_data_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go index 6781148d7f..afc2358c27 100644 --- a/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go +++ b/src/dbnode/storage/bootstrap/bootstrapper/commitlog/source_data_test.go @@ -340,6 +340,7 @@ func TestItMergesSnapshotsAndCommitLogs(t *testing.T) { FileSetType: persist.FileSetSnapshotType, }).Return(nil).AnyTimes() mockReader.EXPECT().Entries().Return(1).AnyTimes() + mockReader.EXPECT().Close().Return(nil).AnyTimes() snapshotValues := []testValue{ {foo, start.Add(1 * time.Minute), 1.0, xtime.Nanosecond, nil},